prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""Distributed containers for holding various types of analysis data.
Containers
==========
.. autosummary::
:toctree:
TimeStream
SiderealStream
GainData
StaticGainData
Map
MModes
RingMap
Container Base Classes
----------------------
.. autosummary::
:toctree:
ContainerBase
TODContainer
VisContainer
Helper Routines
---------------
These routines are designed to be replaced by other packages trying to insert
their own custom container types.
.. autosummary::
:toctree:
empty_like
empty_timestream
"""
import inspect
import numpy as np
from caput import memh5, tod
# Try to import bitshuffle to set the default compression options
try:
import bitshuffle.h5
COMPRESSION = bitshuffle.h5.H5FILTER
COMPRESSION_OPTS = (0, bitshuffle.h5.H5_COMPRESS_LZ4)
except ImportError:
COMPRESSION = None
COMPRESSION_OPTS = None
class ContainerBase(memh5.BasicCont):
"""A base class for pipeline containers.
This class is designed to do much of the work of setting up pipeline
containers. It should be derived from, and two variables set `_axes` and
`_dataset_spec`. See the `Notes`_ section for details.
Parameters
----------
axes_from : `memh5.BasicCont`, optional
Another container to copy axis definitions from. Must be supplied as
keyword argument.
attrs_from : `memh5.BasicCont`, optional
Another container to copy attributes from. Must be supplied as keyword
argument. This applies to attributes in default datasets too.
skip_datasets : bool, optional
Skip creating datasets. They must all be added manually with
`.add_dataset` regardless of the entry in `.dataset_spec`. Default is False.
kwargs : dict
Should contain entries for all other axes.
Notes
-----
Inheritance from other `ContainerBase` subclasses should work as expected,
with datasets defined in super classes appearing as expected, and being
overriden where they are redefined in the derived class.
The variable `_axes` should be a tuple containing the names of axes that
datasets in this container will use.
The variable `_dataset_spec` should define the datasets. It's a dictionary
with the name of the dataset as key. Each entry should be another
dictionary, the entry 'axes' is mandatory and should be a list of the axes
the dataset has (these should correspond to entries in `_axes`), as is
`dtype` which should be a datatype understood by numpy. Other possible
entries are:
- `initialise` : if set to `True` the dataset will be created as the
container is initialised.
- `distributed` : the dataset will be distributed if the entry is `True`, if
`False` it won't be, and if not set it will be distributed if the
container is set to be.
- `distributed_axis` : the axis to distribute over. Should be a name given
in the `axes` entry.
"""
_axes = ()
_dataset_spec = {}
convert_attribute_strings = True
convert_dataset_strings = True
def __init__(self, *args, **kwargs):
# Pull out the values of needed arguments
axes_from = kwargs.pop("axes_from", None)
attrs_from = kwargs.pop("attrs_from", None)
skip_datasets = kwargs.pop("skip_datasets", False)
dist = kwargs.pop("distributed", True)
comm = kwargs.pop("comm", None)
self.allow_chunked = kwargs.pop("allow_chunked", False)
# Run base initialiser
memh5.BasicCont.__init__(self, distributed=dist, comm=comm)
# Check to see if this call looks like it was called like
# memh5.MemDiskGroup would have been. If it is, we're probably trying to
# create a bare container, so don't initialise any datasets. This
# behaviour is needed to support tod.concatenate
if len(args) or "data_group" in kwargs:
return
# Create axis entries
for axis in self.axes:
axis_map = None
# Check if axis is specified in initialiser
if axis in kwargs:
# If axis is an integer, turn into an arange as a default definition
if isinstance(kwargs[axis], int):
axis_map = np.arange(kwargs[axis])
else:
axis_map = kwargs[axis]
# If not set in the arguments copy from another object if set
elif axes_from is not None and axis in axes_from.index_map:
axis_map = axes_from.index_map[axis]
# Set the index_map[axis] if we have a definition, otherwise throw an error
if axis_map is not None:
self.create_index_map(axis, axis_map)
else:
raise RuntimeError("No definition of axis %s supplied." % axis)
# Iterate over datasets and initialise any that specify it
if not skip_datasets:
for name, spec in self.dataset_spec.items():
if "initialise" in spec and spec["initialise"]:
self.add_dataset(name)
# Copy over attributes
if attrs_from is not None:
# Copy attributes from container root
memh5.copyattrs(attrs_from.attrs, self.attrs)
# Copy attributes over from any common datasets
for name in self.dataset_spec.keys():
if name in self.datasets and name in attrs_from.datasets:
attrs_no_axis = {
k: v
for k, v in attrs_from.datasets[name].attrs.items()
if k != "axis"
}
memh5.copyattrs(attrs_no_axis, self.datasets[name].attrs)
# Make sure that the __memh5_subclass attribute is accurate
clspath = self.__class__.__module__ + "." + self.__class__.__name__
clsattr = self.attrs.get("__memh5_subclass", None)
if clsattr and (clsattr != clspath):
self.attrs["__memh5_subclass"] = clspath
def add_dataset(self, name):
"""Create an empty dataset.
The dataset must be defined in the specification for the container.
Parameters
----------
name : string
Name of the dataset to create.
Returns
-------
dset : `memh5.MemDataset`
"""
# Dataset must be specified
if name not in self.dataset_spec:
raise RuntimeError("Dataset name not known.")
dspec = self.dataset_spec[name]
# Fetch dataset properties
axes = dspec["axes"]
dtype = dspec["dtype"]
chunks, compression, compression_opts = None, None, None
if self.allow_chunked:
chunks = dspec.get("chunks", None)
compression = dspec.get("compression", None)
compression_opts = dspec.get("compression_opts", None)
# Get distribution properties
dist = self.distributed and dspec.get("distributed", True)
shape = ()
# Check that all the specified axes are defined, and fetch their lengths
for axis in axes:
if axis not in self.index_map:
if isinstance(axis, int):
l = axis
else:
raise RuntimeError("Axis not defined in index_map")
else:
l = len(self.index_map[axis])
shape += (l,)
# Fetch distributed axis, and turn into axis index
dist_axis = (
dspec["distributed_axis"] if "distributed_axis" in dspec else axes[0]
)
dist_axis = list(axes).index(dist_axis)
# Check chunk dimensions are consistent with axis
if chunks is not None:
final_chunks = ()
for i, l in enumerate(shape):
final_chunks += (min(chunks[i], l),)
chunks = final_chunks
# Create dataset
dset = self.create_dataset(
name,
shape=shape,
dtype=dtype,
distributed=dist,
distributed_axis=dist_axis,
chunks=chunks,
compression=compression,
compression_opts=compression_opts,
)
dset.attrs["axis"] = np.array(axes)
return dset
@property
def datasets(self):
"""Return the datasets in this container.
Do not try to add a new dataset by assigning to an item of this
property. Use `create_dataset` instead.
Returns
-------
datasets : read only dictionary
Entries are :mod:`caput.memh5` datasets.
"""
out = {}
for name, value in self._data.items():
if not memh5.is_group(value):
out[name] = value
return memh5.ro_dict(out)
@property
def dataset_spec(self):
"""Return a copy of the fully resolved dataset specifiction as a
dictionary.
"""
ddict = {}
# Iterate over the reversed MRO and look for _table_spec attributes
# which get added to a temporary dict. We go over the reversed MRO so
# that the `tdict.update` overrides tables in base classes.`
for cls in inspect.getmro(self.__class__)[::-1]:
try:
# NOTE: this is a little ugly as the following line will drop
# down to base classes if dataset_spec isn't present, and thus
# try and `update` with the same values again.
ddict.update(cls._dataset_spec)
except AttributeError:
pass
# Add in any _dataset_spec found on the instance
ddict.update(self.__dict__.get("_dataset_spec", {}))
# Ensure that the dataset_spec is the same order on all ranks
return {k: ddict[k] for k in sorted(ddict)}
@classmethod
def _class_axes(cls):
"""Return the set of axes for this container defined by this class and the base classes."""
axes = set()
# Iterate over the reversed MRO and look for _table_spec attributes
# which get added to a temporary dict. We go over the reversed MRO so
# that the `tdict.update` overrides tables in base classes.
for c in inspect.getmro(cls)[::-1]:
try:
axes |= set(c._axes)
except AttributeError:
pass
# This must be the same order on all ranks, so we need to explicitly sort to get around the
# hash randomization
return tuple(sorted(axes))
@property
def axes(self):
"""The set of axes for this container including any defined on the instance."""
axes = set(self._class_axes())
# Add in any axes found on the instance (this is needed to support the table classes where
# the axes get added at run time)
axes |= set(self.__dict__.get("_axes", []))
# This must be the same order on all ranks, so we need to explicitly sort to get around the
# hash randomization
return tuple(sorted(axes))
@classmethod
def _make_selections(cls, sel_args):
"""
Match down-selection arguments to axes of datasets.
Parses sel_* argument and returns dict mapping dataset names to selections.
Parameters
----------
sel_args : dict
Should contain valid numpy indexes as values and axis names (str) as keys.
Returns
-------
dict
Mapping of dataset names to numpy indexes for downselection of the data.
Also includes another dict under the key "index_map" that includes
the selections for those.
"""
# Check if all those axes exist
for axis in sel_args.keys():
if axis not in cls._class_axes():
raise RuntimeError("No '{}' axis found to select from.".format(axis))
# Build selections dict
selections = {}
for name, dataset in cls._dataset_spec.items():
ds_axes = dataset["axes"]
sel = []
ds_relevant = False
for axis in ds_axes:
if axis in sel_args:
sel.append(sel_args[axis])
ds_relevant = True
else:
sel.append(slice(None))
if ds_relevant:
selections["/" + name] = tuple(sel)
# add index maps selections
for axis, sel in sel_args.items():
selections["/index_map/" + axis] = sel
return selections
def copy(self, shared=None):
"""Copy this container, optionally sharing the source datasets.
This routine will create a copy of the container. By default this is
as full copy with the contents fully independent. However, a set of
dataset names can be given that will share the same data as the
source to save memory for large datasets. These will just view the
same memory, so any modification to either the original or the copy
will be visible to the other. This includes all write operations,
addition and removal of attributes, redistribution etc. This
functionality should be used with caution and clearly documented.
Parameters
----------
shared : list, optional
A list of datasets whose content will be shared with the original.
Returns
-------
copy : subclass of ContainerBase
The copied container.
"""
new_cont = self.__class__(
attrs_from=self,
axes_from=self,
skip_datasets=True,
distributed=self.distributed,
comm=self.comm,
)
# Loop over datasets that exist in the source and either add a view of
# the source dataset, or perform a full copy
for name, data in self.datasets.items():
if shared and name in shared:
# TODO: find a way to do this that doesn't depend on the
# internal implementation of BasicCont and MemGroup
# NOTE: we don't use `.view()` on the RHS here as we want to
# preserve the shared data through redistributions
new_cont._data._get_storage()[name] = self._data._get_storage()[name]
else:
dset = new_cont.add_dataset(name)
# Ensure that we have exactly the same distribution
if dset.distributed:
dset.redistribute(data.distributed_axis)
# Copy over the data and attributes
dset[:] = data[:]
memh5.copyattrs(data.attrs, dset.attrs)
return new_cont
class TableBase(ContainerBase):
"""A base class for containers holding tables of data.
Similar to the `ContainerBase` class, the container is defined through a
dictionary given as a `_table_spec` class attribute. The container may also
hold generic datasets by specifying `_dataset_spec` as with `ContainerBase`.
See `Notes`_ for details.
Parameters
----------
axes_from : `memh5.BasicCont`, optional
Another container to copy axis definitions from. Must be supplied as
keyword argument.
attrs_from : `memh5.BasicCont`, optional
Another container to copy attributes from. Must be supplied as keyword
argument. This applies to attributes in default datasets too.
kwargs : dict
Should contain definitions for all other table axes.
Notes
-----
A `_table_spec` consists of a dictionary mapping table names into a
description of the table. That description is another dictionary containing
several entries.
- `columns` : the set of columns in the table. Given as a list of
`(name, dtype)` pairs.
- `axis` : an optional name for the rows of the table. This is automatically
generated as `'<tablename>_index'` if not explicitly set. This corresponds
to an `index_map` entry on the container.
- `initialise` : whether to create the table by default.
- `distributed` : whether the table is distributed, or common across all MPI ranks.
An example `_table_spec` entry is::
_table_spec = {
'quasars': {
'columns': [
['ra': np.float64],
['dec': np.float64],
['z': np.float64]
],
'distributed': False,
'axis': 'quasar_id'
}
'quasar_mask': {
'columns': [
['mask', np.bool]
],
'axis': 'quasar_id'
}
}
"""
_table_spec = {}
def __init__(self, *args, **kwargs):
# Get the dataset specifiction for this class (not any base classes), or
# an empty dictionary if it does not exist. Do the same for the axes entry..
dspec = self.__class__.__dict__.get("_dataset_spec", {})
axes = self.__class__.__dict__.get("_axes", ())
# Iterate over all table_spec entries and construct dataset specifications for them.
for name, spec in self.table_spec.items():
# Get the specifieid axis or if not present create a unique one for
# this table entry
axis = spec.get("axis", name + "_index")
dtype = self._create_dtype(spec["columns"])
_dataset = {
"axes": [axis],
"dtype": dtype,
"initialise": spec.get("initialise", True),
"distributed": spec.get("distributed", False),
"distributed_axis": axis,
}
dspec[name] = _dataset
if axis not in axes:
axes += (axis,)
self._dataset_spec = dspec
self._axes = axes
super(TableBase, self).__init__(*args, **kwargs)
def _create_dtype(self, columns):
"""Take a dictionary of columns and turn into the
appropriate compound data type.
"""
dt = []
for ci, (name, dtype) in enumerate(columns):
if not isinstance(name, str):
raise ValueError("Column %i is invalid" % ci)
dt.append((name, dtype))
return dt
@property
def table_spec(self):
"""Return a copy of the fully resolved table specifiction as a
dictionary.
"""
import inspect
tdict = {}
for cls in inspect.getmro(self.__class__)[::-1]:
try:
tdict.update(cls._table_spec)
except AttributeError:
pass
return tdict
class TODContainer(ContainerBase, tod.TOData):
"""A pipeline container for time ordered data.
This works like a normal :class:`ContainerBase` container, with the added
ability to be concatenated, and treated like a a :class:`tod.TOData`
instance.
"""
_axes = ("time",)
@property
def time(self):
try:
return self.index_map["time"][:]["ctime"]
# Need to check for both types as different numpy versions return
# different exceptions.
except (IndexError, ValueError):
return self.index_map["time"][:]
class VisContainer(ContainerBase):
"""A base container for holding a visibility dataset.
This works like a :class:`ContainerBase` container, with the
ability to create visibility specific axes, if they are not
passed as a kwargs parameter.
Additionally this container has visibility specific defined properties
such as 'vis', 'weight', 'freq', 'input', 'prod', 'stack',
'prodstack', 'conjugate'.
Parameters
----------
axes_from : `memh5.BasicCont`, optional
Another container to copy axis definitions from. Must be supplied as
keyword argument.
attrs_from : `memh5.BasicCont`, optional
Another container to copy attributes from. Must be supplied as keyword
argument. This applies to attributes in default datasets too.
kwargs : dict
Should contain entries for all other axes.
"""
_axes = ("freq", "input", "prod", "stack")
def __init__(self, *args, **kwargs):
# Resolve product map
prod = None
if "prod" in kwargs:
prod = kwargs["prod"]
elif ("axes_from" in kwargs) and ("prod" in kwargs["axes_from"].index_map):
prod = kwargs["axes_from"].index_map["prod"]
# Resolve input map
inputs = None
if "input" in kwargs:
inputs = kwargs["input"]
elif ("axes_from" in kwargs) and ("input" in kwargs["axes_from"].index_map):
inputs = kwargs["axes_from"].index_map["input"]
# Resolve stack map
stack = None
if "stack" in kwargs:
stack = kwargs["stack"]
elif ("axes_from" in kwargs) and ("stack" in kwargs["axes_from"].index_map):
stack = kwargs["axes_from"].index_map["stack"]
# Automatically construct product map from inputs if not given
if prod is None and inputs is not None:
nfeed = inputs if isinstance(inputs, int) else len(inputs)
kwargs["prod"] = np.array(
[[fi, fj] for fi in range(nfeed) for fj in range(fi, nfeed)]
)
if stack is None and prod is not None:
stack = np.empty_like(prod, dtype=[("prod", "<u4"), ("conjugate", "u1")])
stack["prod"][:] = np.arange(len(prod))
stack["conjugate"] = 0
kwargs["stack"] = stack
# Call initializer from `ContainerBase`
super(VisContainer, self).__init__(*args, **kwargs)
reverse_map_stack = None
# Create reverse map
if "reverse_map_stack" in kwargs:
# If axis is an integer, turn into an arange as a default definition
if isinstance(kwargs["reverse_map_stack"], int):
reverse_map_stack = np.arange(kwargs["reverse_map_stack"])
else:
reverse_map_stack = kwargs["reverse_map_stack"]
# If not set in the arguments copy from another object if set
elif ("axes_from" in kwargs) and ("stack" in kwargs["axes_from"].reverse_map):
reverse_map_stack = kwargs["axes_from"].reverse_map["stack"]
# Set the reverse_map['stack'] if we have a definition,
# otherwise do NOT throw an error, errors are thrown in
# classes that actually need a reverse stack
if reverse_map_stack is not None:
self.create_reverse_map("stack", reverse_map_stack)
@property
def vis(self):
"""The visibility like dataset."""
return self.datasets["vis"]
@property
def weight(self):
"""The visibility weights."""
return self.datasets["vis_weight"]
@property
def freq(self):
"""The frequency axis."""
return self.index_map["freq"]["centre"]
@property
def input(self):
"""The correlated inputs."""
return self.index_map["input"]
@property
def prod(self):
"""All the pairwise products that are represented in the data."""
return self.index_map["prod"]
@property
def stack(self):
"""The stacks definition as an index (and conjugation) of a member product."""
return self.index_map["stack"]
@property
def prodstack(self):
"""A pair of input indices representative of those in the stack.
Note, these are correctly conjugated on return, and so calculations
of the baseline and polarisation can be done without additionally
looking up the stack conjugation.
"""
if not self.is_stacked:
return self.prod
t = self.index_map["prod"][:][self.index_map["stack"]["prod"]]
prodmap = t.copy()
conj = self.stack["conjugate"]
prodmap["input_a"] = np.where(conj, t["input_b"], t["input_a"])
prodmap["input_b"] = | np.where(conj, t["input_a"], t["input_b"]) | numpy.where |
import photutils
from astropy.io import fits, ascii
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
import os
from pkg_resources import resource_filename
if 'DISPLAY' not in os.environ:
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import gridspec
import glob
from photutils import CircularAperture, CircularAnnulus
from photutils import RectangularAperture
from photutils import aperture_photometry
import photutils
if photutils.__version__ > "1.0":
from . import fit_2dgauss
from photutils.centroids import centroid_2dg
else:
from photutils import centroid_2dg
import numpy as np
from astropy.time import Time
import astropy.units as u
import pdb
from copy import deepcopy
import yaml
import warnings
from scipy.stats import binned_statistic
from astropy.table import Table
import multiprocessing
from multiprocessing import Pool
import time
import logging
import urllib
import tqdm
maxCPUs = multiprocessing.cpu_count() // 3
try:
import bokeh.plotting
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.models import Range1d
from bokeh.models import WheelZoomTool
except ImportError as err2:
print("Could not import bokeh plotting. Interactive plotting may not work")
from .utils import robust_poly, robust_statistics
from .utils import get_baseDir
from .instrument_specific import rowamp_sub
def run_one_phot_method(allInput):
"""
Do a photometry/spectroscopy method on one file
For example, do aperture photometry on one file
This is a slightly awkward workaround because multiprocessing doesn't work on object methods
So it's a separate function that takes an object and runs the method
Parameters
-----------
allInput: 3 part tuple (object, int, string)
This contains the object, file index to run (0-based) and name of the method to run
"""
photObj, ind, method = allInput
photMethod = getattr(photObj,method)
return photMethod(ind)
def run_multiprocessing_phot(photObj,fileIndices,method='phot_for_one_file'):
"""
Run photometry/spectroscopy methods on all files using multiprocessing
Awkward workaround because multiprocessing doesn't work on object methods
Parameters
----------
photObj: Photometry object
A photometry Object instance
fileIndices: list
List of file indices
method: str
Method on which to apply multiprocessing
"""
allInput = []
for oneInd in fileIndices:
allInput.append([photObj,oneInd,method])
n_files = len(fileIndices)
if n_files < maxCPUs:
raise Exception("Fewer files to process than CPUs, this can confuse multiprocessing")
p = Pool(maxCPUs)
outputDat = list(tqdm.tqdm(p.imap(run_one_phot_method,allInput),total=n_files))
p.close()
return outputDat
def read_yaml(filePath):
with open(filePath) as yamlFile:
yamlStructure = yaml.safe_load(yamlFile)
return yamlStructure
path_to_example = "parameters/phot_params/example_phot_parameters.yaml"
exampleParamPath = resource_filename('tshirt',path_to_example)
class phot:
def __init__(self,paramFile=exampleParamPath,
directParam=None):
""" Photometry class
Parameters
------
paramFile: str
Location of the YAML file that contains the photometry parameters as long
as directParam is None. Otherwise, it uses directParam
directParam: dict
Rather than use the paramFile, you can put a dictionary here.
This can be useful for running a batch of photometric extractions.
Properties
-------
paramFile: str
Same as paramFile above
param: dict
The photometry parameters like file names, aperture sizes, guess locations
fileL: list
The files on which photometry will be performed
nImg: int
Number of images in the sequence
directParam: dict
Parameter dictionary rather than YAML file (useful for batch processing)
"""
self.pipeType = 'photometry'
self.get_parameters(paramFile=paramFile,directParam=directParam)
defaultParams = {'srcGeometry': 'Circular', 'bkgSub': True, 'isCube': False, 'cubePlane': 0,
'doCentering': True, 'bkgGeometry': 'CircularAnnulus',
'boxFindSize': 18,'backStart': 9, 'backEnd': 12,
'scaleAperture': False, 'apScale': 2.5, 'apRange': [0.01,9999],
'scaleBackground': False,
'nanTreatment': 'zero', 'backOffset': [0.0,0.0],
'srcName': 'WASP 62','srcNameShort': 'wasp62',
'refStarPos': [[50,50]],'procFiles': '*.fits',
'apRadius': 9,'FITSextension': 0,
'jdRef': 2458868,
'nightName': 'UT2020-01-20','srcName'
'FITSextension': 0, 'HEADextension': 0,
'refPhotCentering': None,'isSlope': False,
'itimeKeyword': 'INTTIME','readNoise': None,
'detectorGain': None,'cornerSubarray': False,
'subpixelMethod': 'exact','excludeList': None,
'dateFormat': 'Two Part','copyCentroidFile': None,
'bkgMethod': 'mean','diagnosticMode': False,
'bkgOrderX': 1, 'bkgOrderY': 1,'backsub_directions': ['Y','X'],
'readFromTshirtExamples': False,
'saturationVal': None, 'satNPix': 5, 'nanReplaceValue': 0.0,
'DATE-OBS': None,
'driftFile': None
}
for oneKey in defaultParams.keys():
if oneKey not in self.param:
self.param[oneKey] = defaultParams[oneKey]
xCoors, yCoors = [], []
positions = self.param['refStarPos']
self.nsrc = len(positions)
## Set up file names for output
self.check_file_structure()
self.dataFileDescrip = self.param['srcNameShort'] + '_'+ self.param['nightName']
self.photFile = os.path.join(self.baseDir,'tser_data','phot','phot_'+self.dataFileDescrip+'.fits')
self.centroidFile = os.path.join(self.baseDir,'centroids','cen_'+self.dataFileDescrip+'.fits')
self.refCorPhotFile = os.path.join(self.baseDir,'tser_data','refcor_phot','refcor_'+self.dataFileDescrip+'.fits')
# Get the file list
self.fileL = self.get_fileList()
self.nImg = len(self.fileL)
self.srcNames = np.array(np.arange(self.nsrc),dtype=str)
self.srcNames[0] = 'src'
self.set_up_apertures(positions)
self.check_parameters()
self.get_drift_dat()
def get_parameters(self,paramFile,directParam=None):
if directParam is None:
self.paramFile = paramFile
self.param = read_yaml(paramFile)
else:
self.paramFile = 'direct dictionary'
self.param = directParam
def check_file_structure(self):
"""
Check the file structure for plotting/saving data
"""
baseDir = get_baseDir()
structure_file = resource_filename('tshirt','directory_info/directory_list.yaml')
dirList = read_yaml(structure_file)
for oneFile in dirList:
fullPath = os.path.join(baseDir,oneFile)
ensure_directories_are_in_place(fullPath)
self.baseDir = baseDir
def get_fileList(self):
if self.param['readFromTshirtExamples'] == True:
## Find the files from the package data examples
## This is only when running example pipeline runs or tests
search_path = os.path.join(self.baseDir,'example_tshirt_data',self.param['procFiles'])
if len(glob.glob(search_path)) == 0:
print("Did not find example tshirt data. Now attempting to download...")
get_tshirt_example_data()
else:
search_path = self.param['procFiles']
origList = np.sort(glob.glob(search_path))
if self.param['excludeList'] is not None:
fileList = []
for oneFile in origList:
if os.path.basename(oneFile) not in self.param['excludeList']:
fileList.append(oneFile)
else:
fileList = origList
if len(fileList) == 0:
print("Note: File Search comes up empty")
if os.path.exists(self.photFile):
print("Note: Reading file list from previous phot file instead.")
t1 = Table.read(self.photFile,hdu='FILENAMES')
fileList = np.array(t1['File Path'])
return fileList
def check_parameters(self):
assert type(self.param['backOffset']) == list,"Background offset is not a list"
assert len(self.param['backOffset']) == 2,'Background offset must by a 2 element list'
def set_up_apertures(self,positions):
if self.param['srcGeometry'] == 'Circular':
self.srcApertures = CircularAperture(positions,r=self.param['apRadius'])
elif self.param['srcGeometry'] == 'Square':
self.srcApertures = RectangularAperture(positions,w=self.param['apRadius'],
h=self.param['apRadius'],theta=0)
elif self.param['srcGeometry'] == 'Rectangular':
self.srcApertures = RectangularAperture(positions,w=self.param['apWidth'],
h=self.param['apHeight'],theta=0)
else:
print('Unrecognized aperture')
self.xCoors = self.srcApertures.positions[:,0]
self.yCoors = self.srcApertures.positions[:,1]
if self.param['bkgSub'] == True:
bkgPositions = np.array(deepcopy(positions))
bkgPositions[:,0] = bkgPositions[:,0] + self.param['backOffset'][0]
bkgPositions[:,1] = bkgPositions[:,1] + self.param['backOffset'][1]
if self.param['bkgGeometry'] == 'CircularAnnulus':
self.bkgApertures = CircularAnnulus(bkgPositions,r_in=self.param['backStart'],
r_out=self.param['backEnd'])
elif self.param['bkgGeometry'] == 'Rectangular':
self.bkgApertures = RectangularAperture(bkgPositions,w=self.param['backWidth'],
h=self.param['backHeight'],theta=0)
else:
raise ValueError('Unrecognized background geometry')
def get_default_index(self):
"""
Get the default index from the file list
"""
return self.nImg // 2
def get_default_im(self,img=None,head=None):
""" Get the default image for postage stamps or star identification maps"""
## Get the data
if img is None:
img, head = self.getImg(self.fileL[self.get_default_index()])
return img, head
def get_default_cen(self,custPos=None,ind=0):
"""
Get the default centroids for postage stamps or star identification maps
Parameters
----------
custPos: numpy array
Array of custom positions for the apertures. Otherwise it uses the guess position
ind: int
Image index. This is used to guess the position if a drift file is given
"""
if custPos is None:
initialPos = deepcopy(self.srcApertures.positions)
showApPos = np.zeros_like(initialPos)
showApPos[:,0] = initialPos[:,0] + float(self.drift_dat['dx'][ind])
showApPos[:,1] = initialPos[:,1] + float(self.drift_dat['dy'][ind])
else:
showApPos = custPos
return showApPos
def get_drift_dat(self):
drift_dat_0 = Table()
drift_dat_0['Index'] = np.arange(self.nImg)
#drift_dat_0['File'] = self.fileL
drift_dat_0['dx'] = np.zeros(self.nImg)
drift_dat_0['dy'] = np.zeros(self.nImg)
if self.param['driftFile'] == None:
self.drift_dat = drift_dat_0
drift_file_found = False
else:
if self.param['readFromTshirtExamples'] == True:
## Find the files from the package data examples
## This is only when running example pipeline runs or tests
drift_file_path = os.path.join(self.baseDir,'example_tshirt_data',self.param['driftFile'])
else:
drift_file_path = self.param['driftFile']
if os.path.exists(drift_file_path) == False:
drift_file_found = False
warnings.warn("No Drift file found at {}".format(drift_file_path))
else:
drift_file_found = True
self.drift_dat = ascii.read(drift_file_path)
return drift_file_found
def make_drift_file(self,srcInd=0,refIndex=0):
"""
Use the centroids in photometry to generate a drift file of X/Y offsets
Parameters
----------
srcInd: int
The source index used for drifts
refIndex: int
Which file index corresponds to 0.0 drift
"""
HDUList = fits.open(self.photFile)
cenData = HDUList['CENTROIDS'].data
photHead = HDUList['PHOTOMETRY'].header
nImg = photHead['NIMG']
drift_dat = Table()
drift_dat['Index'] = np.arange(nImg)
x = cenData[:,srcInd,0]
drift_dat['dx'] = x - x[refIndex]
y = cenData[:,srcInd,1]
drift_dat['dy'] = y - y[refIndex]
drift_dat['File'] = HDUList['FILENAMES'].data['File Path']
outPath = os.path.join(self.baseDir,'centroids','drift_'+self.dataFileDescrip+'.ecsv')
drift_dat.meta['Zero Index'] = refIndex
drift_dat.meta['Source Used'] = srcInd
drift_dat.meta['Zero File'] = str(drift_dat['File'][refIndex])
print("Saving Drift file to {}".format(outPath))
drift_dat.write(outPath,overwrite=True,format='ascii.ecsv')
def showStarChoices(self,img=None,head=None,custPos=None,showAps=False,
srcLabel=None,figSize=None,showPlot=False,
apColor='black',backColor='black',
vmin=None,vmax=None,index=None,
labelColor='white',
xLim=None,yLim=None,
txtOffset=20):
"""
Show the star choices for photometry
Parameters
------------------
img : numpy 2D array, optional
An image to plot
head : astropy FITS header, optional
header for image
custPos : numpy 2D array or list of tuple coordinates, optional
Custom positions
showAps : bool, optional
Show apertures rather than circle stars
srcLabel : str or None, optional
What should the source label be? The default is "src"
srcLabel : list or None, optional
Specify the size of the plot.
This is useful for looking at high/lower resolution
showPlot : bool
Show the plot? If True, it will show, otherwise it is saved as a file
apColor: str
The color for the source apertures
backColor: str
The color for the background apertures
vmin: float or None
A value for the :code:`matplotlib.pyplot.plot.imshow` vmin parameter
vmax: float or None
A value for the :code:`matplotlib.pyplot.plot.imshow` vmax parameter
index: int or None
The index of the file name. If None, it uses the default
labelColor: str
Color for the text label for sources
xLim: None or two element list
Specify the minimum and maximum X for the plot. For example xLim=[40,60]
yLim: None or two element list
Specify the minimum and maximum Y for the plot. For example yLim=[40,60]
txtOffset: float
The X and Y offset to place the text label for a source
"""
fig, ax = plt.subplots(figsize=figSize)
if index is None:
index = self.get_default_index()
if img is None:
img, head = self.getImg(self.fileL[index])
else:
img_other, head = self.get_default_im(img=img,head=None)
if vmin is None:
useVmin = np.nanpercentile(img,1)
else:
useVmin = vmin
if vmax is None:
useVmax = np.nanpercentile(img,99)
else:
useVmax = vmax
imData = ax.imshow(img,cmap='viridis',vmin=useVmin,vmax=useVmax,interpolation='nearest')
ax.invert_yaxis()
rad = 50 ## the radius for the matplotlib scatter to show source centers
showApPos = self.get_default_cen(custPos=custPos,ind=index)
if showAps == True:
apsShow = deepcopy(self.srcApertures)
apsShow.positions = showApPos
self.adjust_apertures(index)
if photutils.__version__ >= "0.7":
apsShow.plot(axes=ax,color=apColor)
else:
apsShow.plot(ax=ax,color=apColor)
if self.param['bkgSub'] == True:
backApsShow = deepcopy(self.bkgApertures)
backApsShow.positions = showApPos
backApsShow.positions[:,0] = backApsShow.positions[:,0] + self.param['backOffset'][0]
backApsShow.positions[:,1] = backApsShow.positions[:,1] + self.param['backOffset'][1]
if photutils.__version__ >= "0.7":
backApsShow.plot(axes=ax,color=backColor)
else:
backApsShow.plot(ax=ax,color=backColor)
outName = 'ap_labels_{}.pdf'.format(self.dataFileDescrip)
else:
ax.scatter(showApPos[:,0],showApPos[:,1], s=rad, facecolors='none', edgecolors='r')
outName = 'st_labels_{}.pdf'.format(self.dataFileDescrip)
for ind, onePos in enumerate(showApPos):
#circ = plt.Circle((onePos[0], onePos[1]), rad, color='r')
#ax.add_patch(circ)
if ind == 0:
if srcLabel is None:
name='src'
else:
name=srcLabel
else:
name=str(ind)
ax.text(onePos[0]+txtOffset,onePos[1]+txtOffset,name,color=labelColor)
ax.set_xlabel('X (px)')
ax.set_ylabel('Y (px)')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(imData,label='Counts',cax=cax)
ax.set_xlim(xLim)
ax.set_ylim(yLim)
if showPlot == True:
fig.show()
else:
outF = os.path.join(self.baseDir,'plots','photometry','star_labels',outName)
fig.savefig(outF,
bbox_inches='tight')
plt.close(fig)
def showStamps(self,img=None,head=None,custPos=None,custFWHM=None,
vmin=None,vmax=None,showPlot=False,boxsize=None,index=None):
"""
Shows the fixed apertures on the image with postage stamps surrounding sources
Parameters
-----------
index: int
Index of the file list. This is needed if scaling apertures
"""
## Calculate approximately square numbers of X & Y positions in the grid
numGridY = int(np.floor(np.sqrt(self.nsrc)))
numGridX = int(np.ceil(float(self.nsrc) / float(numGridY)))
fig, axArr = plt.subplots(numGridY, numGridX)
img, head = self.get_default_im(img=img,head=head)
if boxsize == None:
boxsize = self.param['boxFindSize']
showApPos = self.get_default_cen(custPos=custPos)
if index is None:
index = self.get_default_index()
self.adjust_apertures(index)
for ind, onePos in enumerate(showApPos):
if self.nsrc == 1:
ax = axArr
else:
ax = axArr.ravel()[ind]
yStamp_proposed = np.array(onePos[1] + | np.array([-1,1]) | numpy.array |
from __future__ import annotations
#import abc
from abc import ABC, abstractmethod
import copy
import numpy as np
import pandas as pd
import hotstepper.analysis as analysis
import hotstepper.mixins as mixins
from hotstepper.core.data_model import DataModel
from hotstepper.basis.Basis import Basis
from hotstepper.basis.Bases import Bases
from hotstepper.utilities.helpers import (
get_epoch_start,
get_epoch_end,
prepare_input,
get_clean_step_data,
prepare_datetime,
process_slice,
get_datetime)
class AbstractSteps(ABC):
"""
The base class that defines the steps object interface, base properties and methods expected of all derived classes.
"""
__slots__ = ('_start','_using_dt','_end','_basis','_base','_step_data','_ts_scale','_all_data')
def __init__(self,use_datetime=False,basis=None):
super().__init__()
self._step_data = None
self._all_data = None
if basis is None:
basis = Basis()
self._basis = basis
self._using_dt = use_datetime
self._base = basis.base()
self._ts_scale = 1
# Expected methods of parent classes
@abstractmethod
def __repr__(self):
pass
def compare(self,other):
"""
Compare the steps function with another to determine if the two are equivalent based on their cummulative values and step keys.
Parameters
===========
other : AbstractSteps
The other steps object to come this one to
Returns
========
bool
Indication if the two steps a equivalent or not.
"""
st_this_keys = self.step_keys()
st_this_values = self.step_values()
#check if other implements AbstractSteps interface
if type(self).__base__ == type(other).__base__:
st_that_keys = other.step_keys()
st_that_values = other.step_values()
return np.array_equal(st_this_keys, st_that_keys) and np.array_equal(st_this_values,st_that_values)
else:
return (st_this_values==other).all()
def step_data(self,delta_values=False,convert_keys=False):
"""
A clean multi-dimensional numpy array of the step keys and either the cummulative values or the step change values all in floats and ready to use in further analysis.
.. note::
This function returns a dataset that can directly be consumed by numpy, Sklearn and similar packages for forecasting or analysis.
Parameters
===========
delta_values : bool, Optional
Return the step delta changes instead of the cummulative total at each step key.
convert_keys : bool Optional
If the keys are datetime, they will be converted, else they will remain floats.
Returns
========
array
"""
if delta_values:
nice_data = np.copy(self._all_data[:,[DataModel.START.value,DataModel.DIRECTION.value]])
else:
nice_data = np.copy(self._all_data[:,[DataModel.START.value,DataModel.WEIGHT.value]])
if nice_data[0,DataModel.START.value] == get_epoch_start(False):
nice_data = nice_data[1:]
if nice_data[-1,DataModel.START.value] == get_epoch_end(False):
nice_data = nice_data[:-1]
if convert_keys and self._using_dt:
nice_data = np.array(list(zip(prepare_datetime(nice_data[:,DataModel.START.value]),nice_data[:,DataModel.DIRECTION.value])))
if nice_data[0,DataModel.START.value] == get_epoch_start():
nice_data[0,DataModel.START.value] = nice_data[1,DataModel.START.value]
else:
return self._all_data[:,DataModel.START.value]
return nice_data
def iloc(self,idx,raw_keys=True):
"""
The individual step changes at each array index, these are the delta values that add and subtract across the series to realise the entire step function.
Parameters
============
idx : int, slice
The numpy index, range index or slice to lookup the raw step change values wihtin the Steps DataModel
Returns
========
array
Individual step change values within the steps object.
"""
nice_data = np.copy(self._all_data[idx,[DataModel.START.value,DataModel.DIRECTION.value,DataModel.WEIGHT.value]])
return nice_data
def step_changes(self):
"""
The individual step changes at each key value, these are the delta values that add and subtract across the series to realise the entire step function.
Returns
========
array
Individual step change values within the steps object.
"""
return self._all_data[:,DataModel.DIRECTION.value]
def first(self):
"""
The first key or start value of the steps, if the steps extend to negative infinity, the first value will be the first finite key value.
Returns
========
int, float or datetime
First finite key value of the steps.
"""
if self._using_dt:
return get_datetime(self._start)
return self._start
def last(self):
"""
The last key or start value of the steps, if the steps extend to positive infinity, the last value will be the last finite key value.
Returns
========
int, float or datetime
Last finite key value of the steps.
"""
if self._using_dt:
return get_datetime(self._end)
return self._end
def step_values(self):
"""
The cummulative step values at each key value.
Returns
========
array
Cummulative steps value at each step key within the steps object
"""
return self._all_data[:,DataModel.WEIGHT.value]
def step_keys(self,convert_keys=False):
"""
The step key values within this object, can be returned either in raw float format or converted if using datetime.
Parameters
===========
convert_keys : bool Optional
If the keys are datetime, they will be converted, else they will remain floats.
Returns
========
array
Step keys
"""
if convert_keys and self._using_dt:
keys = prepare_datetime(self._all_data[:,DataModel.START.value],self._using_dt)
if keys[0] == get_epoch_start():
keys[0] = keys[1]
return keys
else:
return self._all_data[:,DataModel.START.value]
def __getitem__(self,x):
x = process_slice(x)
return self.fast_step(x)
def __call__(self,x):
return self.fast_step(x)
def step(self, xdata,process_input=True):
"""
This is a mathematical function definition of the Steps object, this is a dynamically created formula representation that can be passed an array of values to evaluate the steps function at.
Parameters
===========
xdata : array_like(int, float, datetime)
The values the steps function is the be evaluated at using the assigned mathematical basis function.
process_input : bool, Optional
Indicate if the input data needs processing, to convert datetimes to floats for calculation. Primarily used internally to avoid converting input data twice.
Returns
========
array
The values of the cummulative steps function evaluated at the provided input (x axis) values.
See Also
=========
fast_step
smooth_step
"""
#if we are using default basis, get answer even quicker
# if self._basis.name == 'Heaviside' and self._all_data.shape[0] != 1:
# return self.fast_step(xdata=xdata,process_input=process_input)
if process_input:
x = prepare_input(xdata)
else:
x = xdata
if self._step_data.shape[0] > 0:
result = self._base(x,self._step_data,self._basis.param)
if (self._basis.name != 'Heaviside') and (x[0] == get_epoch_start(False)):
result[0] = result[1]
else:
return np.zeros(len(x))
return result
def fast_step(self,xdata,process_input=True,side='right'):
"""
This will evaluate the cummulative steps function at the provided input values. This function ignores the assigned basis and performs some numpy trickery to improve performance.
.. note::
This function will ignore the assigned basis and evaluate the cummulative function directly, to ensure the assigned basis is used, please use the `step` function.
Parameters
==========
xdata : array_like(int, float, datetime)
The values the steps function is to be evaluated at.
process_input : bool, Optional
Indicate if the input data needs processing, to convert datetimes to floats for calculation. Primarily used internally to avoid converting input data twice.
side : {'right', 'left'}, Optional
Location to evaluate the steps function relative to the step location. Default is *'right'*, which means the step assumes the weight value on and after the step key value.
Returns
========
array
The values of the cummulative steps function evaluated at the provided input (x axis) values.
See Also
=========
step
smooth_step
"""
if process_input:
x = prepare_input(xdata)
else:
x = xdata
search_data = np.concatenate([self.step(np.array([get_epoch_start(False)]),False),self._all_data[:,DataModel.WEIGHT.value]])
if self._all_data.shape[0] < 5:
return self.step(x)
#improves lookup performance, just need an extra check to avoid over/under run
limit = search_data.shape[0]
idxs = np.searchsorted(self._all_data[:,DataModel.START.value],x,side=side)
return search_data[ | np.clip(idxs,0,limit) | numpy.clip |
import cv2
import h5py
import imageio
import keras
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from IPython.display import Image
from keras import backend as K
from keras.engine import Input, Model
from keras.layers import (
Activation,
Conv3D,
Deconvolution3D,
MaxPooling3D,
UpSampling3D,
)
from keras.layers.merge import concatenate
from keras.optimizers import Adam
from keras.utils import to_categorical
from tensorflow.compat.v1.logging import INFO, set_verbosity
set_verbosity(INFO)
K.set_image_data_format("channels_first")
def plot_image_grid(image):
data_all = []
data_all.append(image)
fig, ax = plt.subplots(3, 6, figsize=[16, 9])
# coronal plane
coronal = np.transpose(data_all, [1, 3, 2, 4, 0])
coronal = np.rot90(coronal, 1)
# transversal plane
transversal = np.transpose(data_all, [2, 1, 3, 4, 0])
transversal = np.rot90(transversal, 2)
# sagittal plane
sagittal = np.transpose(data_all, [2, 3, 1, 4, 0])
sagittal = np.rot90(sagittal, 1)
for i in range(6):
n = np.random.randint(coronal.shape[2])
ax[0][i].imshow(np.squeeze(coronal[:, :, n, :]))
ax[0][i].set_xticks([])
ax[0][i].set_yticks([])
if i == 0:
ax[0][i].set_ylabel('Coronal', fontsize=15)
for i in range(6):
n = np.random.randint(transversal.shape[2])
ax[1][i].imshow(np.squeeze(transversal[:, :, n, :]))
ax[1][i].set_xticks([])
ax[1][i].set_yticks([])
if i == 0:
ax[1][i].set_ylabel('Transversal', fontsize=15)
for i in range(6):
n = np.random.randint(sagittal.shape[2])
ax[2][i].imshow(np.squeeze(sagittal[:, :, n, :]))
ax[2][i].set_xticks([])
ax[2][i].set_yticks([])
if i == 0:
ax[2][i].set_ylabel('Sagittal', fontsize=15)
fig.subplots_adjust(wspace=0, hspace=0)
def visualize_data_gif(data_):
images = []
for i in range(data_.shape[0]):
x = data_[min(i, data_.shape[0] - 1), :, :]
y = data_[:, min(i, data_.shape[1] - 1), :]
z = data_[:, :, min(i, data_.shape[2] - 1)]
img = np.concatenate((x, y, z), axis=1)
images.append(img)
imageio.mimsave("/tmp/gif.gif", images, duration=0.01)
return Image(filename="/tmp/gif.gif", format='png')
# Some code was borrowed from:
# https://github.com/ellisdg/3DUnetCNN/blob/master/unet3d/
def create_convolution_block(input_layer, n_filters, batch_normalization=False,
kernel=(3, 3, 3), activation=None,
padding='same', strides=(1, 1, 1),
instance_normalization=False):
"""
:param strides:
:param input_layer:
:param n_filters:
:param batch_normalization:
:param kernel:
:param activation: Keras activation layer to use. (default is 'relu')
:param padding:
:return:
"""
layer = Conv3D(n_filters, kernel, padding=padding, strides=strides)(
input_layer)
if activation is None:
return Activation('relu')(layer)
else:
return activation()(layer)
def get_up_convolution(n_filters, pool_size, kernel_size=(2, 2, 2),
strides=(2, 2, 2),
deconvolution=False):
if deconvolution:
return Deconvolution3D(filters=n_filters, kernel_size=kernel_size,
strides=strides)
else:
return UpSampling3D(size=pool_size)
def unet_model_3d(loss_function, input_shape=(4, 160, 160, 16),
pool_size=(2, 2, 2), n_labels=3,
initial_learning_rate=0.00001,
deconvolution=False, depth=4, n_base_filters=32,
include_label_wise_dice_coefficients=False, metrics=[],
batch_normalization=False, activation_name="sigmoid"):
"""
Builds the 3D UNet Keras model.f
:param metrics: List metrics to be calculated during model training (default is dice coefficient).
:param include_label_wise_dice_coefficients: If True and n_labels is greater than 1, model will report the dice
coefficient for each label as metric.
:param n_base_filters: The number of filters that the first layer in the convolution network will have. Following
layers will contain a multiple of this number. Lowering this number will likely reduce the amount of memory required
to train the model.
:param depth: indicates the depth of the U-shape for the model. The greater the depth, the more max pooling
layers will be added to the model. Lowering the depth may reduce the amount of memory required for training.
:param input_shape: Shape of the input data (n_chanels, x_size, y_size, z_size). The x, y, and z sizes must be
divisible by the pool size to the power of the depth of the UNet, that is pool_size^depth.
:param pool_size: Pool size for the max pooling operations.
:param n_labels: Number of binary labels that the model is learning.
:param initial_learning_rate: Initial learning rate for the model. This will be decayed during training.
:param deconvolution: If set to True, will use transpose convolution(deconvolution) instead of up-sampling. This
increases the amount memory required during training.
:return: Untrained 3D UNet Model
"""
inputs = Input(input_shape)
current_layer = inputs
levels = list()
# add levels with max pooling
for layer_depth in range(depth):
layer1 = create_convolution_block(input_layer=current_layer,
n_filters=n_base_filters * (
2 ** layer_depth),
batch_normalization=batch_normalization)
layer2 = create_convolution_block(input_layer=layer1,
n_filters=n_base_filters * (
2 ** layer_depth) * 2,
batch_normalization=batch_normalization)
if layer_depth < depth - 1:
current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
levels.append([layer1, layer2, current_layer])
else:
current_layer = layer2
levels.append([layer1, layer2])
# add levels with up-convolution or up-sampling
for layer_depth in range(depth - 2, -1, -1):
up_convolution = get_up_convolution(pool_size=pool_size,
deconvolution=deconvolution,
n_filters=
current_layer._keras_shape[1])(
current_layer)
concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
current_layer = create_convolution_block(
n_filters=levels[layer_depth][1]._keras_shape[1],
input_layer=concat, batch_normalization=batch_normalization)
current_layer = create_convolution_block(
n_filters=levels[layer_depth][1]._keras_shape[1],
input_layer=current_layer,
batch_normalization=batch_normalization)
final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
act = Activation(activation_name)(final_convolution)
model = Model(inputs=inputs, outputs=act)
if not isinstance(metrics, list):
metrics = [metrics]
model.compile(optimizer=Adam(lr=initial_learning_rate), loss=loss_function,
metrics=metrics)
return model
def visualize_patch(X, y):
fig, ax = plt.subplots(1, 2, figsize=[10, 5], squeeze=False)
ax[0][0].imshow(X[:, :, 0], cmap='Greys_r')
ax[0][0].set_yticks([])
ax[0][0].set_xticks([])
ax[0][1].imshow(y[:, :, 0], cmap='Greys_r')
ax[0][1].set_xticks([])
ax[0][1].set_yticks([])
fig.subplots_adjust(wspace=0, hspace=0)
class VolumeDataGenerator(keras.utils.Sequence):
def __init__(self,
sample_list,
base_dir,
batch_size=1,
shuffle=True,
dim=(160, 160, 16),
num_channels=4,
num_classes=3,
verbose=1):
self.batch_size = batch_size
self.shuffle = shuffle
self.base_dir = base_dir
self.dim = dim
self.num_channels = num_channels
self.num_classes = num_classes
self.verbose = verbose
self.sample_list = sample_list
self.on_epoch_end()
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.sample_list))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.sample_list) / self.batch_size))
def __data_generation(self, list_IDs_temp):
'Generates data containing batch_size samples'
# Initialization
X = np.zeros((self.batch_size, self.num_channels, *self.dim),
dtype=np.float64)
y = np.zeros((self.batch_size, self.num_classes, *self.dim),
dtype=np.float64)
# Generate data
for i, ID in enumerate(list_IDs_temp):
# Store sample
if self.verbose == 1:
print("Training on: %s" % self.base_dir + ID)
with h5py.File(self.base_dir + ID, 'r') as f:
X[i] = np.array(f.get("x"))
# remove the background class
y[i] = np.moveaxis(np.array(f.get("y")), 3, 0)[1:]
return X, y
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[
index * self.batch_size: (index + 1) * self.batch_size]
# Find list of IDs
sample_list_temp = [self.sample_list[k] for k in indexes]
# Generate data
X, y = self.__data_generation(sample_list_temp)
return X, y
def get_labeled_image(image, label, is_categorical=False):
if not is_categorical:
label = to_categorical(label, num_classes=4).astype(np.uint8)
image = cv2.normalize(image[:, :, :, 0], None, alpha=0, beta=255,
norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F).astype(
np.uint8)
labeled_image = np.zeros_like(label[:, :, :, 1:])
# remove tumor part from image
labeled_image[:, :, :, 0] = image * (label[:, :, :, 0])
labeled_image[:, :, :, 1] = image * (label[:, :, :, 0])
labeled_image[:, :, :, 2] = image * (label[:, :, :, 0])
# color labels
labeled_image += label[:, :, :, 1:] * 255
return labeled_image
def predict_and_viz(image, label, model, threshold, loc=(100, 100, 50)):
image_labeled = get_labeled_image(image.copy(), label.copy())
model_label = np.zeros([3, 320, 320, 160])
for x in range(0, image.shape[0], 160):
for y in range(0, image.shape[1], 160):
for z in range(0, image.shape[2], 16):
patch = np.zeros([4, 160, 160, 16])
p = np.moveaxis(image[x: x + 160, y: y + 160, z:z + 16], 3, 0)
patch[:, 0:p.shape[1], 0:p.shape[2], 0:p.shape[3]] = p
pred = model.predict(np.expand_dims(patch, 0))
model_label[:, x:x + p.shape[1],
y:y + p.shape[2],
z: z + p.shape[3]] += pred[0][:, :p.shape[1], :p.shape[2],
:p.shape[3]]
model_label = np.moveaxis(model_label[:, 0:240, 0:240, 0:155], 0, 3)
model_label_reformatted = np.zeros((240, 240, 155, 4))
model_label_reformatted = to_categorical(label, num_classes=4).astype(
np.uint8)
model_label_reformatted[:, :, :, 1:4] = model_label
model_labeled_image = get_labeled_image(image, model_label_reformatted,
is_categorical=True)
fig, ax = plt.subplots(2, 3, figsize=[10, 7])
# plane values
x, y, z = loc
ax[0][0].imshow(np.rot90(image_labeled[x, :, :, :]))
ax[0][0].set_ylabel('Ground Truth', fontsize=15)
ax[0][0].set_xlabel('Sagital', fontsize=15)
ax[0][1].imshow(np.rot90(image_labeled[:, y, :, :]))
ax[0][1].set_xlabel('Coronal', fontsize=15)
ax[0][2].imshow(np.squeeze(image_labeled[:, :, z, :]))
ax[0][2].set_xlabel('Transversal', fontsize=15)
ax[1][0].imshow( | np.rot90(model_labeled_image[x, :, :, :]) | numpy.rot90 |
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from matplotlib.ticker import PercentFormatter
data = pd.read_csv('C:\\Users\\stewue\\OneDrive - Wuersten\\Uni\\19_HS\\Masterarbeit\\Repo\\Evaluation\\RQ1_Results\\aggregated\\executiontime.csv')
totalTime = data['executionTime'] * data['parameterizationCombinations'] / 60
all, base = np.histogram(totalTime, bins=1000, range=[0, 30], weights=np.ones(len(totalTime)) / len(totalTime))
cumulative = np.cumsum(all)
fig = plt.figure()
total = totalTime.shape[0]
# absolute
ax1 = fig.add_subplot()
ax1.plot(base[:-1], cumulative * total)
ax1.set_ylabel('# benchmarks')
# relative
ax2 = ax1.twinx()
plt.gca().yaxis.set_major_formatter(PercentFormatter(1, 0))
ax2.plot(base[:-1], cumulative)
ax2.set_ylabel('# benchmarks [cumulative %]')
ax1.set_xlabel('execution time [min]')
plt.yticks(np.arange(0, 0.91, 0.1))
plt.tight_layout()
#plt.show()
#plt.savefig('C:\\Users\\stewue\\OneDrive - Wuersten\\Uni\\19_HS\\Masterarbeit\\Repo\\Evaluation\\RQ1_Results\\images\\execution_time_per_benchmark.pdf')
print("max: " + str( | np.max(totalTime) | numpy.max |
# -*- coding: utf-8 -*-
"""
human VS AI models
Input your move in the format: 2,3
@author: <NAME>
"""
from __future__ import print_function
import pickle
from policy_value_net_numpy import PolicyValueNetNumpy
# from policy_value_net import PolicyValueNet # Theano and Lasagne
# from policy_value_net_pytorch import PolicyValueNet # Pytorch
# from policy_value_net_tensorflow import PolicyValueNet # Tensorflow
# from policy_value_net_keras import PolicyValueNet # Keras
import random
import numpy as np
from collections import defaultdict, deque
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
# from policy_value_net import PolicyValueNet # Theano and Lasagne
from policy_value_net_pytorch import PolicyValueNet # Pytorch
from policy_value_net_pytorch import PolicyValueNet # Pytorch
# from policy_value_net_tensorflow import PolicyValueNet # Tensorflow
# from policy_value_net_keras import PolicyValueNet # Keras
class Human(object):
"""
human player
"""
def __init__(self):
self.player = None
def set_player_ind(self, p):
self.player = p
def get_action(self, board):
try:
location = input("Your move: ")
if isinstance(location, str): # for python3
location = [int(n, 10) for n in location.split(",")]
move = board.location_to_move(location)
except Exception as e:
move = -1
if move == -1 or move not in board.availables:
print("invalid move")
move = self.get_action(board)
return move
def __str__(self):
return "Human {}".format(self.player)
class TrainPipeline():
def __init__(self, init_model=None):
# params of the board and the game
self.board_width = 6
self.board_height = 6
self.n_in_row = 4
self.board = Board(width=self.board_width,
height=self.board_height,
n_in_row=self.n_in_row)
self.game = Game(self.board)
# training params
self.learn_rate = 2e-3
self.lr_multiplier = 1.0 # adaptively adjust the learning rate based on KL
self.temp = 1.0 # the temperature param
self.n_playout = 400 # num of simulations for each move
self.c_puct = 5
self.buffer_size = 10000
self.batch_size = 512 # mini-batch size for training
self.data_buffer = deque(maxlen=self.buffer_size)
self.play_batch_size = 1
self.epochs = 5 # num of train_steps for each update
self.kl_targ = 0.02
self.check_freq = 50
self.game_batch_num = 1500
self.best_win_ratio = 0.0
# num of simulations used for the pure mcts, which is used as
# the opponent to evaluate the trained policy
self.pure_mcts_playout_num = 1000
if init_model:
# start training from an initial policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height,
model_file=init_model)
else:
# start training from a new policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height)
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout,
is_selfplay=1)
def get_equi_data(self, play_data):
"""augment the data set by rotation and flipping
play_data: [(state, mcts_prob, winner_z), ..., ...]
"""
extend_data = []
for state, mcts_porb, winner in play_data:
for i in [1, 2, 3, 4]:
# rotate counterclockwise
equi_state = np.array([np.rot90(s, i) for s in state])
equi_mcts_prob = np.rot90(np.flipud(
mcts_porb.reshape(self.board_height, self.board_width)), i)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
# flip horizontally
equi_state = np.array([np.fliplr(s) for s in equi_state])
equi_mcts_prob = | np.fliplr(equi_mcts_prob) | numpy.fliplr |
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from numpy import linalg as LA
from matplotlib.animation import FuncAnimation
from matplotlib.ticker import FormatStrFormatter
from mpl_toolkits.mplot3d import Axes3D
from operator import itemgetter, attrgetter, truediv
import statistics as stat
from scipy import signal
from scipy.optimize import minimize
import math
import matplotlib.image as mpimg
#from scipy import signal
import librosa
import librosa.display
import wave
import sys
import soundfile as sf
import os
import pyaudio
import threading
import pickle
np.seterr(divide='ignore', invalid='ignore')
import params
WINDOW = params.WINDOW
BINS_PER_OCTAVE = params.BINS_PER_OCTAVE
BINS_PER_OCTAVE_ONSETS = params.BINS_PER_OCTAVE_ONSETS
FILTER_SCALE = params.FILTER_SCALE
STEP = 512
α = params.α
ω = params.ω
H = params.H
T = params.T
T_att = params.T_att
cmap = params.cmap
ϵ = sys.float_info.epsilon
class AudioFile:
chunk = 1024
def __init__(self, file):
""" Init audio stream """
self.file = file
self.wf = wave.open(file, 'rb')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format = self.p.get_format_from_width(self.wf.getsampwidth()),
channels = self.wf.getnchannels(),
rate = self.wf.getframerate(),
output = True
)
def __truePlay(self):
data = self.wf.readframes(self.chunk)
while data != '':
self.stream.write(data)
data = self.wf.readframes(self.chunk)
def play(self):
""" Play entire file """
x = threading.Thread(target=self.__truePlay, args=())
x.start()
def close(self):
""" Graceful shutdown """
self.stream.close()
self.p.terminate()
class SignalSepare:
""" Prend en entrée en signal et le signal des pistes audio séparées. """
def __init__(self, signal, sr, pistes, Notemin = 'D3', Notemax = 'D9', onset_frames = [], delOnsets = [], addOnsets = [], score = [], instrument = ''):
self.y = signal
self.pistes = pistes
self.sr = sr
self.n_pistes = len(pistes)
self.Notemin = Notemin
self.Notemax = Notemax
self.delOnsets = delOnsets
self.addOnsets = addOnsets
self.score = score
self.instrument = instrument
self.n_bins_ONSETS = 0
self.n_bins = 0
self.N = 0
self.fmin = 0
self.fmax = 0
self.n_bins = 0
self.n_att = 0
self.n_frames = 0
self.N_sample = []
self.Dev = []
self.Seuil = []
self.times = []
self.Onset_given = True
self.onset_times = []
self.onset_times_graph = []
self.onset_frames = onset_frames
self.Percu = []
self.Chrom_ONSETS = []
self.ChromDB_ONSETS = []
self.ChromSync_ONSETS = []
self.ChromPistesSync_ONSETS = []
self.ChromDB_reloc = []
self.Chrom = []
self.chromSync = []
self.chromSyncDB = []
self.chromPistesSync = []
self.chromSyncSimpl = []
self.chromPistesSyncSimpl = []
self.ChromNoHpss = []
self.energy = []
self.energyPistes = []
self.activation = []
self.n_notes = []
self.chrom_concordance = []
self.concordance = []
self.chrom_concordanceTot = []
self.concordanceTot = []
self.chrom_concordance3 = []
self.concordance3 = []
self.tension = []
self.roughness = []
self.chrom_harmonicity = []
self.liste_partials = []
self.tensionSignal = []
self.chrom_roughness = []
self.roughnessSignal = []
self.chrom_harmonicChange = []
self.harmonicChange = []
self.chrom_crossConcordance = []
self.crossConcordance = []
self.chrom_crossConcordanceTot = []
self.crossConcordanceTot = []
self.chrom_diffConcordance = []
self.diffRoughness = []
self.chrom_diffRoughness = []
self.diffConcordance = []
self.harmonicity = []
self.virtualPitch = []
self.context = []
self.contextSimpl = []
self.energyContext = []
self.chrom_harmonicNovelty = []
self.harmonicNovelty = []
self.harmonicityContext = []
self.virtualPitchContext = []
self.roughnessContext = []
self.chrom_roughnessContext = []
self.diffConcordanceContext = []
self.chrom_diffConcordanceContext = []
self.diffRoughnessContext = []
self.chrom_diffRoughnessContext = []
def DetectionOnsets(self):
self.fmin = librosa.note_to_hz(self.Notemin)
self.fmax = librosa.note_to_hz(self.Notemax)
#Nmin = int((sr/(fmax*(2**(1/BINS_PER_OCTAVE)-1))))
#Nmax = int((sr/(fmin*(2**(1/BINS_PER_OCTAVE)-1))))
self.n_bins_ONSETS = int((librosa.note_to_midi(self.Notemax) - librosa.note_to_midi(self.Notemin))*BINS_PER_OCTAVE_ONSETS/12)
self.Chrom_ONSETS = np.abs(librosa.cqt(y=self.y, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE_ONSETS, n_bins=self.n_bins_ONSETS, window=WINDOW))
self.ChromDB_ONSETS = librosa.amplitude_to_db(self.Chrom_ONSETS, ref=np.max)
self.N = len(self.ChromDB_ONSETS[0])
self.times = librosa.frames_to_time(np.arange(self.N), sr=self.sr, hop_length=STEP)
# CALCUL DES ONSETS (pour onset précalculé, le rentrer dans self.onset_frames à l'initialisation)
if len(self.onset_frames) == 0:
self.Onset_given = False
Diff = np.zeros((self.n_bins_ONSETS,self.N))
self.Dev = np.zeros(self.N)
for j in range(1,self.N):
for i in range(self.n_bins_ONSETS):
Diff[i,j] = np.abs(self.ChromDB_ONSETS[i,j]-self.ChromDB_ONSETS[i,j-1])
self.Dev[j] = sum(Diff[:,j])
# FONCTION DE SEUIL
# Ajout de zéros en queue et en tête
l = []
Onsets = []
for k in range(int(H/2)):
l.append(0)
for val in self.Dev:
l.append(val)
for k in range(int(H/2)):
l.append(0)
#Calcul de la médiane
for i in range(self.N):
self.Seuil.append(α + ω*stat.median(l[i:i+H]))
if self.Dev[i] > self.Seuil[i]:
Onsets.append(i)
# FONCTION DE TRI SUR LES ONSETS
# Onsets espacés d'au moins T
i=0
while i<(len(Onsets)-1):
while (i<(len(Onsets)-1)) and (self.times[Onsets[i+1]]< self.times[Onsets[i]]+T):
if (self.Dev[Onsets[i+1]]-self.Seuil[Onsets[i+1]]) < (self.Dev[Onsets[i]]-self.Seuil[Onsets[i]]): del Onsets[i+1]
#if (Dev[Onsets[i+1]]) < (Dev[Onsets[i]]): del Onsets[i+1]
else: del Onsets[i]
i=i+1
# Suppression manuelle des onsets en trop (cela nécessite d'avoir affiché les onsets jusqu'ici détectés)
if isinstance(self.delOnsets, str): Onsets = []
else:
self.delOnsets.sort(reverse = True)
for o in self.delOnsets:
Onsets.pop(o-1)
#Ajout manuel des onsets
for t in self.addOnsets:
Onsets.append(librosa.time_to_frames(t, sr=self.sr, hop_length=STEP))
Onsets.sort()
self.onset_frames = librosa.util.fix_frames(Onsets, x_min=0, x_max=self.ChromDB_ONSETS.shape[1]-1)
self.onset_frames = librosa.util.fix_frames(self.onset_frames, x_min=0, x_max=self.ChromDB_ONSETS.shape[1]-1)
self.onset_times = librosa.frames_to_time(self.onset_frames, sr=self.sr, hop_length = STEP)
self.n_frames = len(self.onset_frames)-1
self.n_notes = np.ones(self.n_frames)
# TRANSFORMÉE avec la précision due pour l'analyse
self.n_bins = int((librosa.note_to_midi(self.Notemax) - librosa.note_to_midi(self.Notemin))*BINS_PER_OCTAVE/12)
self.Chrom = np.abs(librosa.cqt(y=self.y, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=self.n_bins, window=WINDOW, filter_scale = FILTER_SCALE))
# self.Chrom[np.isnan(self.Chrom)] = 0
# Relocalisation
if params.spectral_reloc:
freq_analyse = [self.fmin*2**(k/BINS_PER_OCTAVE) for k in range(self.n_bins)]
N = [round(self.sr * params.FILTER_SCALE/(f*(2**(1/BINS_PER_OCTAVE)-1))) for f in freq_analyse]
self.N_sample = [round(n/STEP) for n in N]
Chrom_copy = np.copy(self.Chrom)
for k in range(self.n_bins):
for n in reversed(range(self.N)):
if n <= self.N_sample[k]: self.Chrom[k,n] = Chrom_copy[k,n]
else: self.Chrom[k,n] = Chrom_copy[k,n-int(self.N_sample[k]/2)]
# Décomposition partie harmonique / partie percussive
if params.decompo_hpss:
self.ChromNoHpss = np.copy(self.Chrom)
self.Chrom = librosa.decompose.hpss(self.Chrom, margin=params.margin)[0]
self.ChromDB = librosa.amplitude_to_db(self.Chrom, ref=np.max)
#Synchronisation sur les onsets, en enlevant le début et la fin des longues frames
self.chromSync = np.zeros((self.n_bins,self.n_frames))
self.n_att = int(librosa.time_to_frames(T_att, sr=self.sr, hop_length = STEP))
# for j in range(self.n_frames):
# if j==0:
# for i in range(self.n_bins):
# self.chromSync[i,j] = np.median(self.Chrom[i][self.onset_frames[j]:self.onset_frames[j+1]])
# else:
# for i in range(self.n_bins):
# self.chromSync[i,j] = np.median(self.Chrom[i][(self.onset_frames[j]+self.n_att):(self.onset_frames[j+1])])
Δmin = 0.1 # en secondes
for i in range(self.n_bins):
f = self.fmin*2**(i/BINS_PER_OCTAVE)
T_ret = 1.5 / (f * (2**(1.0/(12*4)) - 1))
for j in range(self.n_frames):
if j==0: self.chromSync[i,j] = np.median(self.Chrom[i][self.onset_frames[j]:self.onset_frames[j+1]])
else:
if T_ret < (self.onset_times[j+1] - self.onset_times[j+1]) - Δmin:
self.chromSync[i,j] = np.median(self.Chrom[i][(self.onset_frames[j]+int(librosa.time_to_frames(T_ret, sr=self.sr, hop_length = STEP))):(self.onset_frames[j+1])])
else:
self.chromSync[i,j] = np.median(self.Chrom[i][(self.onset_frames[j+1]-int(librosa.time_to_frames(Δmin, sr=self.sr, hop_length = STEP))):(self.onset_frames[j+1])])
self.chromSync[np.isnan(self.chromSync)] = 0
self.chromSync[:,0] = np.zeros(self.n_bins)
self.chromSync[:,-1] = np.zeros(self.n_bins)
self.chromSyncDB = librosa.amplitude_to_db(self.chromSync, ref=np.max)
#Calcul de l'énergie
for t in range(self.n_frames):
self.energy.append(LA.norm(self.chromSync[:,t])**2)
def Clustering(self):
""" Découpe et synchronise les pistes séparées sur les ONSETS, stoque le spectrogramme
synchronisé en construisant self.chromPistesSync"""
if len(self.pistes) != 0:
# Construction de chromPistesSync
ChromPistes = []
for k, voice in enumerate(self.pistes):
if params.decompo_hpss:
ChromPistes.append(np.nan_to_num(librosa.decompose.hpss(np.abs(librosa.cqt(y=voice, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=self.n_bins)), margin=params.margin)[0],False))
else: ChromPistes.append(np.nan_to_num(np.abs(librosa.cqt(y=voice, sr=self.sr, hop_length = STEP, fmin= self.fmin, bins_per_octave=BINS_PER_OCTAVE, n_bins=self.n_bins)),False))
for k, voice in enumerate(self.pistes):
# if params.spectral_reloc:
# ChromPistesCopy = np.copy(ChromPistes)
# for f in range(self.n_bins):
# for n in reversed(range(self.N)):
# if n <= self.N_sample[f]: ChromPistes[k][f,n] = ChromPistesCopy[k][f,n]
# else: ChromPistes[k][f,n] = ChromPistesCopy[k][f,n-int(self.N_sample[f]/2)]
self.chromPistesSync.append(np.zeros((self.n_bins,self.n_frames)))
for j in range(self.n_frames):
if j==0:
for i in range(self.n_bins):
self.chromPistesSync[k][i,j] = np.median(ChromPistes[k][i][self.onset_frames[j]:self.onset_frames[j+1]])
else:
for i in range(self.n_bins):
self.chromPistesSync[k][i,j] = np.median(ChromPistes[k][i][(self.onset_frames[j]+self.n_att):(self.onset_frames[j+1]-self.n_att)])
# Calcul de l'énergie des pistes
self.energyPistes = np.zeros((self.n_pistes, self.n_frames))
for t in range(self.n_frames):
for k in range(self.n_pistes):
self.energyPistes[k,t] = np.sum(np.multiply(self.chromPistesSync[k][:,t], self.chromPistesSync[k][:,t]))
# Calcul de la matrice d'activation (pour savoir quelles voix contiennent des notes à quel moment) + mise à zéro des pistes sans note
# Par défaut, tout est à 1, ie dans chaque frame chaque piste joue une note
self.activation = np.ones((self.n_pistes, self.n_frames))
if title in params.list_calcul_nombre_notes:
max_energy = np.amax(self.energyPistes, axis = 1)
for k in range(self.n_pistes):
for t in range(self.n_frames):
if (self.energyPistes[k,t] < params.seuil_activation * max_energy[k]):
self.activation[k,t] = 0
self.chromPistesSync[k][:,t] = 0
self.activation[:,0] = 0
self.activation[:,self.n_frames-1] = 0
elif title in params.list_3_voix:
self.activation[-1] = np.zeros(self.n_frames)
# Calcul du nombre de notes
self.n_notes = np.sum(self.activation, axis=0)
self.n_notes[0] = 0
self.n_notes[-1] = 0
def Context(self, type = params.memory_type, size = params.memory_size - 1, decr = params.memory_decr_ponderation):
#Construction du context harmonique
self.context = np.zeros((self.n_bins,self.n_frames))
self.context[:,0] = self.chromSync[:,0]
# Memory = "full"
if isinstance(size,str):
if type == 'max':
for t in range(1,self.n_frames):
self.context[:,t] = np.fmax(self.context[:,t-1],self.chromSync[:,t])
elif type == 'mean':
#Construction du vecteur de pondération
weights = [(1/i**decr) for i in range(1,self.n_frames+2)]
#Moyennage
for t in range(1,self.n_frames):
self.context[:,t] = np.average(self.chromSync[:,:(t+1)], axis=1, weights=[weights[t-i] for i in range(t+1)])
# Memory = int
elif isinstance(size,int):
if type == 'max':
for t in range(1,size+1):
self.context[:,t] = np.fmax(self.chromSync[:,t], self.context[:,t-1])
for t in range(size+1,self.n_frames):
self.context[:,t] = np.amax(self.chromSync[:,(t-size):(t+1)], axis = 1)
elif type == 'mean':
#Construction du vecteur de pondération
weights = [(1/i**decr) for i in range(1,size+2)]
#Moyennage
for t in range(1,size+1):
self.context[:,t] = np.average(self.chromSync[:,1:(t+1)], axis=1, weights=[weights[t-i] for i in range(1,t+1)])
for t in range(size+1,self.n_frames):
self.context[:,t] = np.average(self.chromSync[:,(t-size):(t+1)], axis=1, weights=[weights[size-i] for i in range(size+1)])
#Calcul de l'énergie du contexte
self.energyContext = []
for t in range(self.n_frames):
self.energyContext.append(LA.norm(self.context[:,t])**2)
def SimplifySpectrum(self):
self.chromSyncSimpl = np.zeros(self.chromSync.shape)
self.contextSimpl = np.zeros(self.context.shape)
self.chromPistesSyncSimpl= np.copy(self.chromPistesSync)
δ = params.δ
if distribution == 'themeAcc' or distribution == 'voix':
for t in range(self.n_frames):
for i in range(10, self.n_bins - 10):
for p in range(len(self.pistes)):
if self.chromPistesSync[p][i,t] < np.max(self.chromPistesSync[p][i-δ:i+δ+1,t]): self.chromPistesSyncSimpl[p][i,t] = 0
else: self.chromPistesSyncSimpl[p][i,t] = np.sum(self.chromPistesSync[p][i-δ:i+δ+1,t])
# Global
if self.chromSync[i,t] < np.max(self.chromSync[i-δ:i+δ+1,t]): self.chromSyncSimpl[i,t] = 0
else: self.chromSyncSimpl[i,t] = np.sum(self.chromSync[i-δ:i+δ+1,t])
# Contexte
if self.context[i,t] < np.max(self.context[i-δ:i+δ+1,t]): self.contextSimpl[i,t] = 0
else: self.contextSimpl[i,t] = np.sum(self.context[i-δ:i+δ+1,t])
# for p in range(len(self.pistes)):
# self.chromSyncSimpl += self.chromPistesSyncSimpl[p]
elif distribution == 'record':
for t in range(self.n_frames):
for i in range(10, self.n_bins - 10):
# Global
if self.chromSync[i,t] < np.max(self.chromSync[i-δ:i+δ+1,t]): self.chromSyncSimpl[i,t] = 0
else: self.chromSyncSimpl[i,t] = np.sum(self.chromSync[i-δ:i+δ+1,t])
# Contexte
if self.context[i,t] < np.max(self.context[i-δ:i+δ+1,t]): self.contextSimpl[i,t] = 0
else: self.contextSimpl[i,t] = np.sum(self.context[i-δ:i+δ+1,t])
# Liste des partiels de self.chromSyncSimpl
self.liste_partials = []
for t in range(self.n_frames):
self.liste_partials.append([])
for k in range(self.n_bins):
if self.chromSyncSimpl[k,t] > 0: self.liste_partials[t].append(k)
def Concordance(self):
"""Multiplie les spectres (cqt) des différentes pistes pour créer le spectre de concordance,
et calcule la concordance en sommant sur les fréquences"""
self.chrom_concordance = np.zeros((self.n_bins,self.n_frames))
for k in range(self.n_pistes-1):
for l in range(k+1, self.n_pistes):
self.chrom_concordance += np.multiply(self.chromPistesSync[k], self.chromPistesSync[l])
# Normalisation par l'énergie et par le nombre de notes
for t in range(self.n_frames):
if self.n_notes[t] >= 2:
self.chrom_concordance[:,t] *= (self.n_notes[t]**(2*params.norm_conc)/(self.n_notes[t]*(self.n_notes[t]-1)/2)) / (self.energy[t]**params.norm_conc)
self.chrom_concordance[:,0] = 0
self.chrom_concordance[:,self.n_frames-1] = 0
self.concordance = self.chrom_concordance.sum(axis=0)
# self.concordance[0]=0
# self.concordance[self.n_frames-1]=0
def ConcordanceTot(self):
"""Multiplie les spectres (cqt) des différentes pistes pour créer le spectre de concordance,
et calcule la concordance en sommant sur les fréquences"""
self.chrom_concordanceTot = np.ones((self.n_bins,self.n_frames))
for t in range(self.n_frames):
for k in range(self.n_pistes):
if self.activation[k,t]:
self.chrom_concordanceTot[:,t] = np.multiply(self.chrom_concordanceTot[:,t], self.chromPistesSync[k][:,t])
if self.n_notes[t]>=1:
self.chrom_concordanceTot[:,t] = np.divide((self.n_notes[t]**(self.n_notes[t]*params.norm_concTot)) * self.chrom_concordanceTot[:,t], LA.norm(self.chromSync[:,t], self.n_notes[t])**(self.n_notes[t]*params.norm_concTot))
self.concordanceTot.append(self.chrom_concordanceTot[:,t].sum(axis=0))#**(1./self.n_notes[t]))
self.chrom_concordanceTot[:,0] = 0
self.chrom_concordanceTot[:,self.n_frames-1] = 0
self.concordanceTot[0]=0
self.concordanceTot[self.n_frames-1]=0
def Concordance3(self):
self.chrom_concordance3 = np.zeros((self.n_bins,self.n_frames))
for k in range(self.n_pistes-2):
for l in range(k+1, self.n_pistes-1):
for m in range(l+1, self.n_pistes):
self.chrom_concordance3 += np.multiply(np.multiply(self.chromPistesSync[k], self.chromPistesSync[l]), self.chromPistesSync[m])
# Normalisation par la norme 3 et le nombre de notes
for t in range(self.n_frames):
if self.n_notes[t] >= 3:
self.chrom_concordance3[:,t] *= (self.n_notes[t]**(3*params.norm_conc3)/(self.n_notes[t]*(self.n_notes[t]-1)*(self.n_notes[t]-2)/6)) / LA.norm(self.chromSync[:,t],ord=3)**(3*params.norm_conc3)
self.chrom_concordance3[:,0] = 0
self.chrom_concordance3[:,self.n_frames-1] = 0
self.concordance3 = self.chrom_concordance3.sum(axis=0)
# self.concordance3[0]=0
# self.concordance3[self.n_frames-1]=0
def Roughness(self):
#fonction qui convertit les amplitudes en sonies
# def Sones(Ampl):
# P = Ampl/np.sqrt(2)
# SPL = 20*np.log10(P/params.P_ref)
# return ((1/16)*np.power(2,SPL/10))
self.chrom_roughness = np.zeros((self.n_bins,self.n_frames))
self.roughness = np.zeros(self.n_frames)
for b1 in range(self.n_bins-1):
for b2 in range(b1+1,self.n_bins):
# Modèle de Sethares
f1 = self.fmin*2**(b1/BINS_PER_OCTAVE)
f2 = self.fmin*2**(b2/BINS_PER_OCTAVE)
freq = [f1, f2]
freq.sort()
if params.mod_rough == 'sethares + KK':
s = (1/2.27)*(np.log(params.β2/params.β1)/(params.β2-params.β1))/(freq[0]**(0.477))
elif params.mod_rough == 'sethares':
s = 0.24/(0.021*freq[0] + 19)
rug = np.exp(-params.β1*s*(freq[1]-freq[0]))-np.exp(-params.β2*s*(freq[1]-freq[0]))
if not params.type_rug_signal:
for p1 in range(self.n_pistes-1):
for p2 in range(p1+1, self.n_pistes):
if params.rug_simpl:
self.chrom_roughness[b1] += (self.chromPistesSyncSimpl[p1][b1] * self.chromPistesSyncSimpl[p2][b2] + self.chromPistesSyncSimpl[p1][b2] * self.chromPistesSyncSimpl[p2][b1]) * rug/2
self.chrom_roughness[b2] += (self.chromPistesSyncSimpl[p1][b1] * self.chromPistesSyncSimpl[p2][b2] + self.chromPistesSyncSimpl[p1][b2] * self.chromPistesSyncSimpl[p2][b1]) * rug/2
else:
self.chrom_roughness[b1] += (self.chromPistesSync[p1][b1] * self.chromPistesSync[p2][b2] + self.chromPistesSync[p1][b2] * self.chromPistesSync[p2][b1]) * rug/2
self.chrom_roughness[b2] += (self.chromPistesSync[p1][b1] * self.chromPistesSync[p2][b2] + self.chromPistesSync[p1][b2] * self.chromPistesSync[p2][b1]) * rug/2
else:
if params.rug_simpl:
self.chrom_roughness[b1] += (self.chromSyncSimpl[b1] * self.chromSyncSimpl[b2]) * rug/2
self.chrom_roughness[b2] += (self.chromSyncSimpl[b1] * self.chromSyncSimpl[b2]) * rug/2
else:
self.chrom_roughness[b1] += (self.chromSync[b1] * self.chromSync[b2]) * rug/2
self.chrom_roughness[b2] += (self.chromSync[b1] * self.chromSync[b2]) * rug/2
# Normalisation par l'énergie et par le nombre de n_notes
for t in range(self.n_frames):
if not params.type_rug_signal:
if self.n_notes[t] >= 2:
self.chrom_roughness[:,t] *= (self.n_notes[t]**(2*params.norm_rug) / (self.n_notes[t]*(self.n_notes[t]-1)/2.0)) / (self.energy[t]**params.norm_rug)
else:
self.chrom_roughness[:,t] /= self.energy[t]**params.norm_rug
self.chrom_roughness[:,0] = 0
self.roughness = self.chrom_roughness.sum(axis=0)
self.roughness[0]=0
self.roughness[self.n_frames-1]=0
def RoughnessSignal(self):
self.roughnessSignal = np.zeros(self.n_frames)
for b1 in range(self.n_bins-1):
for b2 in range(b1+1,self.n_bins):
# Modèle de Sethares
f1 = self.fmin*2**(b1/BINS_PER_OCTAVE)
f2 = self.fmin*2**(b2/BINS_PER_OCTAVE)
freq = [f1, f2]
freq.sort()
s = 0.44*(np.log(params.β2/params.β1)/(params.β2-params.β1))*(freq[1]-freq[0])/(freq[0]**(0.477))
rug = np.exp(-params.β1*s)-np.exp(-params.β2*s)
self.roughnessSignal = self.roughnessSignal + (self.chromSync[b1,:] * self.chromSync[b2,:]) * rug
if params.norm_rug: self.roughnessSignal = np.divide(self.roughnessSignal, np.power(self.energy,1.0))
self.roughnessSignal[0]=0
self.roughnessSignal[self.n_frames-1]=0
def Tension(self):
self.tension = np.zeros(self.n_frames)
for t in range(self.n_frames):
self.liste_partials[t].sort()
long = len(self.liste_partials[t])
for i1 in range(long-2) :
for i2 in range(i1+1,long-1):
for i3 in range(i2+1,long):
int1, int2 = self.liste_partials[t][i2] - self.liste_partials[t][i1], self.liste_partials[t][i3] - self.liste_partials[t][i2]
tens = np.exp(-((int2-int1) / (0.6*BINS_PER_OCTAVE/12.0))**2)
self.tension[t] += self.chromSyncSimpl[self.liste_partials[t][i1],t] * self.chromSyncSimpl[self.liste_partials[t][i2],t] * self.chromSyncSimpl[self.liste_partials[t][i3],t] * tens
# Normalisation par l'énergie et par le nombre de n_notes
for t in range(self.n_frames):
self.tension[t] /= self.energy[t]**(params.norm_tension*3/2.0)
self.tension[0]=0
self.tension[self.n_frames-1]=0
#
# self.tension = np.zeros(self.n_frames)
# # liste des partiels
# set_partials = set()
# for t in range(self.n_frames):
# set_partials = set_partials.union(set(self.liste_partials[t]))
# liste_partials_full = list(set_partials)
# liste_partials_full.sort()
#
#
# # Calcul de la tension
# long = len(liste_partials_full)
# for i1 in range(long-2) :
# for i2 in range(i1+1,long-1):
# for i3 in range(i2+1,long):
# int1, int2 = liste_partials_full[i2] - liste_partials_full[i1], liste_partials_full[i3] - liste_partials_full[i2]
# tens = np.exp(-((int2-int1) / (0.6*BINS_PER_OCTAVE/12.0))**2)
# for t in range(self.n_frames):
# self.tension[t] += self.chromSyncSimpl[liste_partials_full[i1],t] * self.chromSyncSimpl[liste_partials_full[i2],t] * self.chromSyncSimpl[liste_partials_full[i3],t] * tens
#
# # Normalisation par l'énergie et par le nombre de n_notes
# for t in range(self.n_frames):
# self.tension[t] /= self.energy[t]**(params.norm_tension*3/2.0)
# # if self.n_notes[t] >= 3:
# # self.tension[t] *= (self.n_notes[t]**(3*params.norm_tension) / (self.n_notes[t]*(self.n_notes[t]-1)*(self.n_notes[t]-2)/6.0)) / (self.energy[t]**(params.norm_tension*3/2.0))
#
# self.tension[0]=0
# self.tension[self.n_frames-1]=0
def TensionSignal(self):
# Calcul des spectres des pistes en 1/4 de tons pour avoir un calcul de la tensionSignal en temps raisonnable
self.ChromSync_ONSETS = np.zeros((self.n_bins_ONSETS,self.n_frames))
for j in range(self.n_frames):
for i in range(self.n_bins_ONSETS):
self.ChromSync_ONSETS[i,j] = np.median(self.Chrom_ONSETS[i][(self.onset_frames[j]+self.n_att):(self.onset_frames[j+1]-self.n_att)])
#Calcul tensionSignal
self.tensionSignal = np.zeros(self.n_frames)
for b1 in range(self.n_bins_ONSETS-2):
for b2 in range(b1+1, self.n_bins_ONSETS-1):
for b3 in range(b2+2, self.n_bins_ONSETS):
int = [abs(b3-b1), abs(b2-b1), abs(b3-b2)]
int.sort()
monInt = int[1]-int[0]
tens = np.exp(-((int[1]-int[0])* BINS_PER_OCTAVE/(12*params.δ))**2)
self.tensionSignal = self.tensionSignal + (self.ChromSync_ONSETS[b1,:] * self.ChromSync_ONSETS[b2,:] * self.ChromSync_ONSETS[b3,:]) * tens
self.tensionSignal = np.divide(self.tensionSignal, np.power(self.energy, 3/2))
self.tensionSignal[0]=0
self.tensionSignal[self.n_frames-1]=0
def CrossConcordance(self):
if len(self.concordance) == 0: self.Concordance()
self.chrom_crossConcordance = np.zeros((self.n_bins,self.n_frames-1))
for t in range(self.n_frames-1):
self.chrom_crossConcordance[:,t] = np.multiply(self.chrom_concordance[:,t],self.chrom_concordance[:,t+1])
if params.norm_crossConc:
if self.concordance[t]*self.concordance[t+1]!=0:
self.chrom_crossConcordance[:,t] = np.divide(self.chrom_crossConcordance[:,t], self.concordance[t]*self.concordance[t+1])
self.crossConcordance = self.chrom_crossConcordance.sum(axis=0)
self.crossConcordance[0]=0
self.crossConcordance[self.n_frames-2]=0
def CrossConcordanceTot(self):
if len(self.concordanceTot) == 0: self.ConcordanceTot()
self.chrom_crossConcordanceTot = np.zeros((self.n_bins,self.n_frames-1))
for t in range(self.n_frames-1):
self.chrom_crossConcordanceTot[:,t] = np.multiply(self.chrom_concordanceTot[:,t],self.chrom_concordanceTot[:,t+1])
if params.norm_crossConcTot:
if self.concordanceTot[t]*self.concordanceTot[t+1]!=0:
self.chrom_crossConcordanceTot[:,t] = np.divide(self.chrom_crossConcordanceTot[:,t], self.concordanceTot[t]*self.concordanceTot[t+1])
self.crossConcordanceTot = self.chrom_crossConcordanceTot.sum(axis=0)
self.crossConcordanceTot[0]=0
self.crossConcordanceTot[self.n_frames-2]=0
def HarmonicChange(self):
self.chrom_harmonicChange = np.zeros((self.n_bins,self.n_frames-1))
for t in range(self.n_frames-1):
# Par défaut, params.type_harmChange == 'relative'
if params.norm_harmChange == 'None':
self.chrom_harmonicChange[:,t] = self.chromSync[:,t+1] - self.context[:,t]
elif params.norm_harmChange == 'frame_by_frame':
self.chrom_harmonicChange[:,t] = self.chromSync[:,t+1]/np.sqrt(self.energy[t+1]) - self.context[:,t]/np.sqrt(self.energyContext[t])
elif params.norm_harmChange == 'general':
self.chrom_harmonicChange[:,t] = (self.chromSync[:,t+1] - self.context[:,t]) / (self.energy[t+1]*self.energyContext[t])**(1.0/4)
if params.type_harmChange == 'absolute': self.chrom_harmonicChange[:,t] = np.abs(self.chrom_harmonicChange[:,t])
self.harmonicChange = np.sum(np.power(self.chrom_harmonicChange,1), axis=0)
self.harmonicChange[0]=0
self.harmonicChange[-1]=0
def HarmonicNovelty(self):
if len(self.context) == 0: self.context()
# Construction du spectre des Nouveautés harmoniques
self.chrom_harmonicNovelty = np.zeros((self.n_bins,self.n_frames))
self.chrom_harmonicNovelty[:,1] = self.chromSync[:,1]
for t in range(2,self.n_frames):
if params.norm_Novelty == 'frame_by_frame':
self.chrom_harmonicNovelty[:,t] = self.chromSync[:,t]/np.sqrt(self.energy[t]) - self.context[:,t-1]/np.sqrt(self.energyContext[t-1])
elif params.norm_Novelty == 'general':
self.chrom_harmonicNovelty[:,t] = np.divide(self.chromSync[:,t] - self.context[:,t-1], (self.energy[t]*self.energyContext[t-1])**(1/4))
elif params.norm_Novelty == 'None':
self.chrom_harmonicNovelty[:,t] = self.chromSync[:,t] - self.context[:,t-1]
for i in range(self.n_bins):
if self.chrom_harmonicNovelty[:,t][i]<0: self.chrom_harmonicNovelty[:,t][i] = 0
# Construction des Nouveautés harmoniques
self.harmonicNovelty = np.exp(self.chrom_harmonicNovelty.sum(axis=0))
if params.type_Novelty == 'dyn':
self.harmonicNovelty = self.harmonicNovelty[1:]
self.harmonicNovelty[0]=0
self.harmonicNovelty[-1]=0
def DiffConcordance(self):
self.chrom_diffConcordance = np.zeros((self.n_bins,self.n_frames-1))
if not params.theme_diffConc:
for t in range(self.n_frames-1):
self.chrom_diffConcordance[:,t] = np.multiply(self.chromSync[:,t], self.chromSync[:,t+1])
self.chrom_diffConcordance[:,t] /= np.sqrt(self.energy[t] * self.energy[t+1]) ** params.norm_diffConc
else :
for t in range(self.n_frames-1):
if self.activation[0,t+1]:
self.chrom_diffConcordance[:,t] = np.multiply(self.chromSync[:,t], self.chromPistesSync[0][:,t+1]) / np.sqrt(self.energy[t] * self.energyPistes[0][t+1])** params.norm_diffConc
self.diffConcordance = self.chrom_diffConcordance.sum(axis=0)
self.diffConcordance[0]=0
self.diffConcordance[self.n_frames-2]=0
def Harmonicity(self):
# Construction du spectre harmonique
dec = BINS_PER_OCTAVE/6 # décalage d'un ton pour tenir compte de l'épaisseur des gaussiennes
epaiss = int(np.rint(BINS_PER_OCTAVE/(2*params.σ)))
print(epaiss)
SpecHarm = np.zeros(2*int(dec) + int(np.rint(BINS_PER_OCTAVE * np.log2(params.κ))))
for k in range(params.κ):
pic = int(dec + np.rint(BINS_PER_OCTAVE * np.log2(k+1)))
for i in range(-epaiss, epaiss+1):
SpecHarm[pic + i] = 1/(k+1)**params.decr
len_corr = self.n_bins + len(SpecHarm) - 1
# Correlation avec le spectre réel
self.chrom_harmonicity = np.zeros((len_corr,self.n_frames))
self.harmonicity = []
for t in range(self.n_frames):
self.chrom_harmonicity[:,t] = np.correlate(np.power(self.chromSync[:,t],params.norm_harmonicity), SpecHarm,"full") / self.energy[t]**(params.norm_harmonicity * params.norm_harm/2)
self.harmonicity.append(np.exp(max(self.chrom_harmonicity[:,t])))
# Virtual Pitch
self.virtualPitch.append(np.argmax(self.chrom_harmonicity[:,t]))
virtualNotes = librosa.hz_to_note([self.fmin * (2**((i-len(SpecHarm)+dec+1)/BINS_PER_OCTAVE)) for i in self.virtualPitch] , cents = False)
global f_corr_min
f_corr_min = self.fmin * (2**((-len(SpecHarm)+dec+1)/BINS_PER_OCTAVE))
print(virtualNotes[1:self.n_frames-1])
self.chrom_harmonicity[:,0] = 0
self.chrom_harmonicity[:,self.n_frames-1] = 0
self.harmonicity[0]=0
self.harmonicity[self.n_frames-1]=0
def HarmonicityContext(self):
# Construction du spectre harmonique
dec = BINS_PER_OCTAVE/6 # décalage d'un ton pour tenir compte de l'épaisseur des gaussiennes
epaiss = int(np.rint(BINS_PER_OCTAVE/(2*params.σ)))
SpecHarm = np.zeros(2*int(dec) + int(np.rint(BINS_PER_OCTAVE * np.log2(params.κ))))
for k in range(params.κ):
pic = int(dec + np.rint(BINS_PER_OCTAVE * np.log2(k+1)))
for i in range(-epaiss, epaiss+1):
SpecHarm[pic + i] = 1/(k+1)**params.decr
# Correlation avec le spectre réel
self.harmonicityContext = []
for t in range(self.n_frames):
self.harmonicityContext.append(max(np.correlate(np.power(self.context[:,t],params.norm_harmonicity), SpecHarm,"full")) / LA.norm(self.context[:,t], ord = params.norm_harmonicity)**(params.norm_harmonicity))
# Virtual Pitch
self.virtualPitchContext.append(np.argmax(np.correlate(np.power(self.context[:,t],params.norm_harmonicity), SpecHarm,"full")))
diffVirtualNotes = librosa.hz_to_note([self.fmin * (2**((i-len(SpecHarm)+dec+1)/BINS_PER_OCTAVE)) for i in self.virtualPitchContext] , cents = False)
print(diffVirtualNotes[1:self.n_frames-1])
def RoughnessContext(self):
self.chrom_roughnessContext = np.zeros((self.n_bins,self.n_frames))
for b1 in range(self.n_bins-1):
for b2 in range(b1+1,self.n_bins):
f1 = self.fmin*2**(b1/BINS_PER_OCTAVE)
f2 = self.fmin*2**(b2/BINS_PER_OCTAVE)
freq = [f1, f2]
freq.sort()
if params.mod_rough == 'sethares + KK':
s = (1/2.27)*(np.log(params.β2/params.β1)/(params.β2-params.β1))/(freq[0]**(0.477))
elif params.mod_rough == 'sethares':
s = 0.24/(0.021*freq[0] + 19)
rug = np.exp(-params.β1*s*(freq[1]-freq[0]))-np.exp(-params.β2*s*(freq[1]-freq[0]))
for t in range(self.n_frames):
self.chrom_roughnessContext[b1,t] += (self.context[b1,t] * self.context[b2,t]) * rug / 2
self.chrom_roughnessContext[b2,t] += (self.context[b1,t] * self.context[b2,t]) * rug / 2
if params.norm_rugCtx:
for t in range(self.n_frames-1):
self.chrom_roughnessContext[:,t] = np.divide(self.chrom_roughnessContext[:,t], self.energyContext[t])
self.roughnessContext = self.chrom_roughnessContext.sum(axis=0)
def DiffConcordanceContext(self):
self.chrom_diffConcordanceContext = np.zeros((self.n_bins,self.n_frames-1))
if not params.theme_diffConcCtx:
for t in range(self.n_frames-1):
self.chrom_diffConcordanceContext[:,t] = np.multiply(self.context[:,t], self.chromSync[:,t+1])
if params.norm_diffConcCtx:
self.chrom_diffConcordanceContext[:,t] /= | np.sqrt(self.energyContext[t] * self.energy[t+1]) | numpy.sqrt |
# Standard imports
import os
from datetime import datetime as dt
import numpy as np
from calendar import monthrange
import calendar
import logging
# Third-party imports
import pandas as pd
# Internal imports
from generate_profile.main import LinearModel
LOG_FORMAT = '%(asctime)s: %(levelname)s: %(message)s'
class DataHandler:
def __init__(self,config_dict,logger=None):
self.config_dict = config_dict
if logger != None:
self.logger = logger
else:
self.logger = logging.getLogger()
logging.basicConfig(format=LOG_FORMAT,level='DEBUG')
self.read_files()
self.get_trans_capacity()
self.logger.info('DataHandler initiallized ..')
def read_files(self):
self.dt_data = pd.read_csv(os.path.join(self.config_dict['project_path'],\
self.config_dict['dtpower_file']), parse_dates=['Date'])
self.dt_metadata = pd.read_csv(os.path.join(self.config_dict['project_path'],\
self.config_dict['dt_metadata_file']))
self.feeders = set(self.dt_metadata['Feeder Name'].tolist())
self.solar_data = pd.read_csv(os.path.join(self.config_dict['project_path'],\
self.config_dict['solar_irradiance_file']),parse_dates=['Date'],index_col='Date')
self.dt_to_feeder_map = dict(zip(self.dt_metadata['Transformer Name'], self.dt_metadata['Feeder Name']))
if 'optional_data' in self.config_dict:
self.customer_energy = pd.read_csv(os.path.join(self.config_dict['project_path'], \
self.config_dict['optional_data']['folder_name'], self.config_dict['optional_data']['customer_energy_file']))
self.weather_data = pd.read_csv(os.path.join(self.config_dict['project_path'], \
self.config_dict['optional_data']['folder_name'], self.config_dict['optional_data']['weather_file']),
parse_dates=['Date'])
self.weather_data = self.weather_data.set_index('Date')
self.date_data = pd.read_csv(os.path.join(self.config_dict['project_path'], \
self.config_dict['optional_data']['folder_name'], self.config_dict['optional_data']['date_data_file']),
parse_dates=['Date'])
self.date_data = self.date_data.set_index('Date')
# Here create linear model instace
dt_data_indexed = self.dt_data.set_index('Date')
self.profile_instance = LinearModel(dt_data_indexed,
self.customer_energy,
self.weather_data,
self.date_data)
self.logger.info('All files successfully read')
def get_trans_capacity(self):
self.trans_capacity = dict(zip(self.dt_metadata['Transformer Name'],self.dt_metadata['KVA Capacity']))
def get_customernumber_bygroup(self, name, dtorfeeder):
if hasattr(self, 'customer_energy'):
customer_energy_group = self.customer_energy.groupby('Transformer Name')
if dtorfeeder == 'DT':
customer_number_by_group = {'domestic':0,'commercial':0,'industrial':0}
dt_group = customer_energy_group.get_group(name)
grouped_by_custtype = dt_group.groupby('Customer Type')
for cust_group in list(grouped_by_custtype.groups):
customer_number_by_group[cust_group] = len(grouped_by_custtype.get_group(cust_group))
return list(customer_number_by_group.values())
else:
customer_number_by_group = {'domestic':0,'commercial':0,'industrial':0}
for dist in self.dt_to_feeder_map:
if self.dt_to_feeder_map[dist] == name:
dt_group = customer_energy_group.get_group(dist)
grouped_by_custtype = dt_group.groupby('Customer Type')
for cust_group in list(grouped_by_custtype.groups):
customer_number_by_group[cust_group] += len(grouped_by_custtype.get_group(cust_group))
return list(customer_number_by_group.values())
return [1,1,1]
# def get_loaddataframe(self,dt_name,year):
# result_path = self.config_dict['linear_model_results_path']
# filename = dt_name+'-'+str(year)+'.csv'
# dt_result_dataframe = pd.read_csv(os.path.join(result_path,filename),parse_dates=[0])
# return dt_result_dataframe
def return_solar_multiplier(self,startdate,mode):
num_of_days= 366 if calendar.isleap(startdate.year) else 365
data = self.solar_data['Irradiance']
if mode=='Daily':
date_range = pd.date_range(startdate,periods=48,freq='30min')
if mode == 'Weekly':
date_range = pd.date_range(startdate,periods=48*7,freq='30min')
if mode == 'Monthly':
date_range = pd.date_range(dt(startdate.year,startdate.month,1,0,0,0),\
periods=48*monthrange(startdate.year, startdate.month)[1],freq='30min')
if mode == 'Yearly':
date_range = pd.date_range(dt(startdate.year,1,1,0,0,0),\
periods=48*num_of_days,freq='30min')
return [data[date] for date in date_range]
def analyze_feeder(self,feeder_name, year, mode, userdate, startdate=[], enddate=[]):
feeder_energy, feeder_energy_high_res = [], []
for dist in self.dt_to_feeder_map:
if self.dt_to_feeder_map[dist] == feeder_name:
dt_data, dt_data_high_res = self.analyze_dt(dist,year,mode,userdate,startdate,enddate)
t_data,p_data = zip(*dt_data)
if feeder_energy == []:
feeder_energy = p_data
feeder_energy_high_res = dt_data_high_res
else:
feeder_energy=[sum(filter(None,x)) for x in zip(p_data,feeder_energy)]
feeder_energy_high_res = [sum(filter(None,x)) for x in \
zip(dt_data_high_res,feeder_energy_high_res)]
return zip(t_data,feeder_energy), feeder_energy_high_res
def analyze_dt(self, dt_name, year, mode, userdate, startdate=[], enddate=[]):
if startdate ==[]: startdate=dt(year,1,1,0,0,0)
if enddate ==[]: enddate = dt(year,12,31,23,59,0)
self.dt_df = pd.DataFrame({
'DATE': self.dt_data['Date'].tolist(),
'Energy(kwh)': self.dt_data[dt_name].tolist()
})
# Introducing year column to slice it by year
self.dt_df['Year'] = [str(date.year) \
for date in self.dt_df['DATE'].tolist()]
# Get data for input year
self.dt_df_grouped_year = self.dt_df.groupby('Year')
if str(year) in list(self.dt_df_grouped_year.groups):
self.dt_df_year = self.dt_df_grouped_year.get_group(str(year))
# Let's find out missing time stamps
all_date_list = list(pd.date_range(startdate,enddate,freq='30min'))
available_date_list = self.dt_df_year['DATE'].tolist()
# Replace missing time-stamps with zero or None value
temp_dict = dict(zip(available_date_list, self.dt_df_year['Energy(kwh)'].tolist()))
new_dict = {date: None for date in all_date_list}
new_dict = {**new_dict,**temp_dict}
self.dt_data_by_year = pd.DataFrame({'DATE': [keys for keys in new_dict.keys()],
'Average Power (kW)': list(new_dict.values())})
self.dt_data_by_year = self.dt_data_by_year.set_index('DATE')
self.dt_power_by_year = self.dt_data_by_year['Average Power (kW)']
if mode == 'Daily':
daily_dt_data = [[date,self.dt_power_by_year[date]] for date in self.dt_power_by_year.index \
if date.year==userdate.year and date.month==userdate.month and date.day==userdate.day]
return daily_dt_data, [x[1] for x in daily_dt_data]
if mode == 'Weekly':
weekbegin = dt(userdate.year, userdate.month, userdate.day, 0,0,0)
weekly_date_list = pd.date_range(weekbegin,periods=48*7,freq='30min')
weekly_dt_list = [self.dt_power_by_year[date] for date in weekly_date_list]
weekly_dt_list_splitted = np.array_split(np.array(weekly_dt_list),7)
weekly_date_list_splitted = np.array_split( | np.array(weekly_date_list) | numpy.array |
import argparse
from common.args import Args
import torch
import sys
import pathlib
import h5py
import numpy as np
import numpy.fft as nf
import random
import torchvision
from torchvision import transforms, utils
import torch
from torch.nn import functional as F
from pytorch_msssim import ssim
import vd_spiral
import sigpy as sp
import sigpy.plot as pl
sys.argv=['']
def create_arg_parser(parser=None):
if parser:
return parser
parser = argparse.ArgumentParser(description="ML parameters")
parser.add_argument('--num-pools', type=int, default=4, help='Number of U-Net pooling layers')
parser.add_argument('--drop-prob', type=float, default=0.0, help='Dropout probability')
parser.add_argument('--num-chans', type=int, default=32, help='Number of U-Net channels')
parser.add_argument('--batch-size', default=1, type=int, help='Mini batch size')
parser.add_argument('--num-epochs', type=int, default=500, help='Number of training epochs')
parser.add_argument('--lr', type=float, default=0.1, help='Learning rate')
parser.add_argument('--lr-step-size', type=int, default=40,
help='Period of learning rate decay')
parser.add_argument('--lr-gamma', type=float, default=0.1,
help='Multiplicative factor of learning rate decay')
parser.add_argument('--weight-decay', type=float, default=0.,
help='Strength of weight decay regularization')
parser.add_argument('--report-interval', type=int, default=100, help='Period of loss reporting')
parser.add_argument('--data-parallel', default=True,
help='If set, use multiple GPUs using data parallelism')
parser.add_argument('--device', type=str, default='cpu',
help='Which device to train on. Set to "cuda" to use the GPU')
parser.add_argument('--exp-dir', type=pathlib.Path, default='/mnt/mnt/5TB_slot2/Tobias/Thesis/wrapper',
help='Path where model and results should be saved')
parser.add_argument('--resume', action='store_true', default=False,
help='If set, resume the training from a previous model checkpoint. '
'"--checkpoint" should be set with this')
parser.add_argument('--checkpoint', type=str, default='/mnt/mnt/5TB_slot2/Tobias/Thesis/wrapper/model.pt',
help='Path to an existing checkpoint. Used along with "--resume"')
parser.add_argument('--logdir', type=str, default='/mnt/mnt/5TB_slot2/Tobias/Thesis/log/wrapper_org',
help='Path to an existing checkpoint. Used along with "--resume"')
parser.add_argument('--seed', default=42, type=int, help='Seed for random number generators')
parser.add_argument('--resolution', default=128, type=int, help='Resolution of images')
parser.add_argument('--device_ids', default=[0,1] , help='GPUS used')
parser.add_argument('--acceleration', default=4, help='Acceleration factor used in artifical undersampling')
return parser
args=create_arg_parser().parse_args()
def to_tensor(data):
if np.iscomplexobj(data):
data = np.stack((data.real, data.imag), axis=-1)
return torch.from_numpy(data).double()
def rsos(data,ax=1):
return np.sqrt(np.sum(np.square(np.abs(data)),axis=ax))
def to_complex(data):
data = data.numpy()
return data[..., 0] + 1j * data[..., 1]
def make_ift_one(data):
return nf.ifft(data,axis=0)
def make_ft_one(data):
return nf.fft(data,axis=0)
def make_ift(data):
try:
if len(data.shape)>2:
return nf.fftshift(nf.ifftn(nf.ifftshift(data),axes=(-2,-1)))
return nf.fftshift(nf.ifftn(nf.ifftshift(data),axes=(0,-1)))
except:
data=data.detach().numpy()
if len(data.shape)>2:
return nf.fftshift(nf.ifftn(nf.ifftshift(data),axes=(-2,-1)))
return nf.fftshift(nf.ifftn(nf.ifftshift(data),axes=(0,-1)))
return -1
def make_ft(data):
if len(data.shape)>2:
return nf.fftshift(nf.fftn(nf.ifftshift(data),axes=(-2,-1)))
return nf.fftshift(nf.fftn(nf.ifftshift(data),axes=(0,-1)))
def center_crop(data, shape):
"""
Apply a center crop to the input real image or batch of real images.
Args:
data (torch.Tensor): The input tensor to be center cropped. It should have at
least 2 dimensions and the cropping is applied along the last two dimensions.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-2]
assert 0 < shape[1] <= data.shape[-1]
w_from = (data.shape[-2] - shape[0]) // 2
h_from = (data.shape[-1] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
if len(data.shape)>2:
return data[..., w_from:w_to, h_from:h_to]
else:
return data[w_from:w_to, h_from:h_to]
def complex_center_crop(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
if len(data.shape)>2:
return data[..., w_from:w_to, h_from:h_to, :]
else:
return data[w_from:w_to, h_from:h_to, :]
def complex_center_crop_2d(data, shape):
"""
Apply a center crop to the input image or batch of complex images.
Args:
data (torch.Tensor): The complex input tensor to be center cropped. It should
have at least 3 dimensions and the cropping is applied along dimensions
-3 and -2 and the last dimensions should have a size of 2.
shape (int, int): The output shape. The shape should be smaller than the
corresponding dimensions of data.
Returns:
torch.Tensor: The center cropped image
"""
assert 0 < shape[0] <= data.shape[-3]
assert 0 < shape[1] <= data.shape[-2]
w_from = (data.shape[-3] - shape[0]) // 2
h_from = (data.shape[-2] - shape[1]) // 2
w_to = w_from + shape[0]
h_to = h_from + shape[1]
return data[w_from:w_to, h_from:h_to, :]
#statistics for normalization
#masking k-space (multiple undersamplings to test)
def random_cartesian_mask(shape=[1,args.resolution,args.resolution],center_fractions=[0.04],accelerations=[4],seed=42):
rng = np.random.RandomState()
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
rng.seed(seed)
choice = rng.randint(0, len(accelerations))
center_fraction = center_fractions[choice]
acceleration = accelerations[choice]
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# Create the mask
prob = (num_cols / acceleration - num_low_freqs) / (num_cols - num_low_freqs)
mask = rng.uniform(size=num_cols) < prob
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
#print(mask.shape)
return mask
def equi_cartesian_mask(shape=[1,args.resolution,args.resolution],center_fractions=[0.04],accelerations=[8],seed=42):
rng = np.random.RandomState()
if len(shape) < 3:
raise ValueError('Shape should have 3 or more dimensions')
rng.seed(seed)
choice = rng.randint(0, len(accelerations))
center_fraction = center_fractions[choice]
acceleration = accelerations[choice]
num_cols = shape[-2]
num_low_freqs = int(round(num_cols * center_fraction))
# Create the mask
mask = np.zeros(num_cols, dtype=np.float32)
pad = (num_cols - num_low_freqs + 1) // 2
mask[pad:pad + num_low_freqs] = True
# Determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_freqs - num_cols)) / (num_low_freqs * acceleration - num_cols)
offset = rng.randint(0, round(adjusted_accel))
accel_samples = np.arange(offset, num_cols - 1, adjusted_accel)
accel_samples = np.around(accel_samples).astype(np.uint)
mask[accel_samples] = True
# Reshape the mask
mask_shape = [1 for _ in shape]
mask_shape[-2] = num_cols
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def plain_cartesian_mask(shape=[1,args.resolution,args.resolution],acceleration=args.acceleration):
mask=np.array([i%acceleration==0 for i in range(shape[-2])])
mask_shape = [1 for _ in shape]
mask_shape[-2] = shape[-2]
mask = torch.from_numpy(mask.reshape(*mask_shape).astype(np.float32))
return mask
def apply_mask(data,mode="mid",r=4):
shape = np.array(data.shape)
if mode=="random":
c_mask = random_cartesian_mask(shape,accelerations=[r])
elif mode=="mid":
c_mask = equi_cartesian_mask(shape,accelerations=[r])
else:
c_mask=plain_cartesian_mask(shape,acceleration=r)
#print(c_mask.shape)
return data * c_mask, c_mask
def losses(out,tar):
mse=F.mse_loss(out, tar)
l1=F.l1_loss(out, tar)
ssim=ssim( out, tar, data_range=1, size_average=False)
return mse,l1,ssim
def check_density(traj,d=[],r0=1):
d_p=[]
temp=1
r=r0
rs=[]
for i in range(len(traj)):
#print((traj[i, 0]**2 + traj[i, 1]**2)**0.5)
#print("r",r)
if (traj[i, 0]**2 + traj[i, 1]**2)**0.5>r:
#print(r)
d.append(temp)
for i in range(temp-1):
d_p.append(temp)
rs.append(r)
temp=1
r=np.sqrt(r0+r**2)
while (traj[i, 0]**2 + traj[i, 1]**2)**0.5>r:
d.append(temp)
r=np.sqrt(r0+r**2)
rs.append(r)
temp=2
else:
temp+=1
d.append(temp)
for i in range(temp-1):
d_p.append(temp)
#print(d)
return np.array(d),np.array(rs),np.array(d_p)
def get_dcf(points,dcf,rs):
for i in range(len(points)):
for j in range(len(points[i])):
print(j)
idx=0
while (i-args.resolution)**2 + (j-args.resolution)**2>rs[idx]:
if idx==len(rs)-1:
break
idx+=1
points[i,j]=points[i,j]*1/dcf[idx]
return points
def apply_dcf(ksp,dp):
return ksp/dp
def spiral_undersampling(data,r=8):
full=data.shape[-2]*data.shape[-1]
us_factor=r
N=full//us_factor
nRounds=100;
PowCoeff=2;
m_size=160;
p_base=0;
traj=vd_spiral.makeSpiral(N,nRounds,PowCoeff,p_base,m_size);
traj= | np.swapaxes(traj,0,1) | numpy.swapaxes |
import numpy as np
class PerspCamera(object):
r"""Perspective camera in 35mm format.
Attributes:
f_mm (float): See ``f``.
im_h (float): See ``im_res``.
im_w (float): See ``im_res``.
loc (numpy.ndarray)
lookat (numpy.ndarray)
up (numpy.ndarray)
Note:
- Sensor width of the 35mm format is actually 36mm.
- This class assumes unit pixel aspect ratio (i.e., :math:`f_x = f_y`)
and no skewing between the sensor plane and optical axis.
- The active sensor size may be smaller than ``sensor_size``, depending
on ``im_res``.
- ``aov`` is a hardware property, having nothing to do with ``im_res``.
"""
def __init__(
self, f=50., im_res=(256, 256), loc=(1, 1, 1), lookat=(0, 0, 0),
up=(0, 1, 0)):
"""
Args:
f (float, optional): 35mm format-equivalent focal length in mm.
im_res (array_like, optional): Image height and width in pixels.
loc (array_like, optional): Camera location in object space.
lookat (array_like, optional): Where the camera points to in
object space, so default :math:`(0, 0, 0)` is the object center.
up (array_like, optional): Vector in object space that, when
projected, points upward in image.
"""
self.f_mm = f
self.im_h, self.im_w = im_res
self.loc = np.array(loc)
self.lookat = np.array(lookat)
self.up = np.array(up)
@property
def sensor_w(self):
"""float: Fixed at 36mm"""
return 36 # mm
@property
def sensor_h(self):
"""float: Fixed at 24mm"""
return 24 # mm
@property
def aov(self):
"""tuple: Vertical and horizontal angles of view in degrees."""
alpha_v = 2 * np.arctan(self.sensor_h / (2 * self.f_mm))
alpha_h = 2 * np.arctan(self.sensor_w / (2 * self.f_mm))
return (alpha_v / np.pi * 180, alpha_h / np.pi * 180)
@property
def _mm_per_pix(self):
return min(self.sensor_h / self.im_h, self.sensor_w / self.im_w)
@property
def f_pix(self):
"""float: Focal length in pixels."""
return self.f_mm / self._mm_per_pix
@property
def int_mat(self):
"""numpy.ndarray: 3-by-3 intrinsics matrix."""
return np.array([
[self.f_pix, 0, self.im_w / 2],
[0, self.f_pix, self.im_h / 2],
[0, 0, 1],
])
@property
def ext_mat(self):
"""numpy.ndarray: 3-by-4 extrinsics matrix, i.e., rotation and
translation that transform a point from object space to camera space.
"""
# Two coordinate systems involved:
# 1. Object space: "obj"
# 2. Desired computer vision camera coordinates: "cv"
# - x is horizontal, pointing right (to align with pixel coordinates)
# - y is vertical, pointing down
# - right-handed: positive z is the look-at direction
# cv axes expressed in obj space
cvz_obj = self.lookat - self.loc
assert np.linalg.norm(cvz_obj) > 0, "Camera location and lookat coincide"
cvx_obj = np.cross(cvz_obj, self.up)
cvy_obj = np.cross(cvz_obj, cvx_obj)
# Normalize
cvz_obj = cvz_obj / np.linalg.norm(cvz_obj)
cvx_obj = cvx_obj / np.linalg.norm(cvx_obj)
cvy_obj = cvy_obj / np.linalg.norm(cvy_obj)
# Compute rotation from obj to cv: R
# R(1, 0, 0)^T = cvx_obj gives first column of R
# R(0, 1, 0)^T = cvy_obj gives second column of R
# R(0, 0, 1)^T = cvz_obj gives third column of R
rot_obj2cv = np.vstack((cvx_obj, cvy_obj, cvz_obj)).T
# Extrinsics
return rot_obj2cv.dot(
np.array([
[1, 0, 0, -self.loc[0]],
[0, 1, 0, -self.loc[1]],
[0, 0, 1, -self.loc[2]],
])
)
@property
def proj_mat(self):
"""numpy.ndarray: 3-by-4 projection matrix, derived from
intrinsics and extrinsics.
"""
return self.int_mat.dot(self.ext_mat)
def set_from_mitsuba(self, xml_path):
"""Sets camera according to a Mitsuba XML file.
Args:
xml_path (str): Path to the XML file.
Raises:
NotImplementedError: If focal length is not specified in mm.
"""
from xml.etree.ElementTree import parse
tree = parse(xml_path)
# Focal length
f_tag = tree.find('./sensor/string[@name="focalLength"]')
if f_tag is None:
self.f_mm = 50. # Mitsuba default
else:
f_str = f_tag.attrib['value']
if f_str[-2:] == 'mm':
self.f_mm = float(f_str[:-2])
else:
raise NotImplementedError(f_str)
# Extrinsics
cam_transform = tree.find('./sensor/transform/lookAt').attrib
self.loc = np.fromstring(cam_transform['origin'], sep=',')
self.lookat = np.fromstring(cam_transform['target'], sep=',')
self.up = np.fromstring(cam_transform['up'], sep=',')
# Resolution
self.im_h = int(tree.find('./sensor/film/integer[@name="height"]').attrib['value'])
self.im_w = int(tree.find('./sensor/film/integer[@name="width"]').attrib['value'])
def proj(self, pts, space='object'):
"""Projects 3D points to 2D.
Args:
pts (array_like): 3D point(s) of shape N-by-3 or 3-by-N, or of length 3.
space (str, optional): In which space these points are specified:
``'object'`` or ``'camera'``.
Returns:
array_like: Vertical and horizontal coordinates of the projections, following:
.. code-block:: none
+-----------> dim1
|
|
|
v dim0
"""
pts = np.array(pts)
if pts.shape == (3,):
pts = pts.reshape((3, 1))
elif pts.shape[1] == 3:
pts = pts.T
assert space in ('object', 'camera'), "Unrecognized space"
# 3 x N
n_pts = pts.shape[1]
pts_homo = np.vstack((pts, np.ones((1, n_pts))))
# 4 x N
if space == 'object':
proj_mat = self.proj_mat
else:
ext_mat = np.hstack((np.eye(3), np.zeros((3, 1))))
proj_mat = self.int_mat.dot(ext_mat)
# Project
hvs_homo = proj_mat.dot(pts_homo)
# 3 x N: dim0 is horizontal, and dim1 is vertical
hs_homo = hvs_homo[0, :]
vs_homo = hvs_homo[1, :]
ws = hvs_homo[2, :]
hs = np.divide(hs_homo, ws)
vs = np.divide(vs_homo, ws)
vhs = np.vstack((vs, hs)).T
if vhs.shape[0] == 1:
# Single point
vhs = vhs[0, :]
return vhs
def backproj(self, depth, fg_mask=None, depth_type='plane', space='object'):
"""Backprojects depth map to 3D points.
Args:
depth (numpy.ndarray): Depth map.
fg_mask (numpy.ndarray, optional): Backproject only pixels falling inside this
foreground mask. Its values should be logical.
depth_type (str, optional): Plane or ray depth.
space (str, optional): In which space the backprojected points are specified:
``'object'`` or ``'camera'``.
Returns:
numpy.ndarray: 3D points.
"""
if fg_mask is None:
fg_mask = np.ones(depth.shape, dtype=bool)
assert depth_type in ('ray', 'plane'), "Unrecognized depth type"
assert space in ('object', 'camera'), "Unrecognized space"
v_is, h_is = np.where(fg_mask)
hs = h_is + 0.5
vs = v_is + 0.5
h_c = (depth.shape[1] - 1) / 2
v_c = (depth.shape[0] - 1) / 2
zs = depth[fg_mask]
if depth_type == 'ray':
d2 = np.power(vs - v_c, 2) + np.power(hs - h_c, 2)
# Similar triangles
zs_plane = np.multiply(zs, self.f_pix / np.sqrt(self.f_pix ** 2 + d2))
zs = zs_plane
# Backproject to camera space
xs = np.multiply(zs, hs - h_c) / self.f_pix
ys = np.multiply(zs, vs - v_c) / self.f_pix
pts = np.vstack((xs, ys, zs))
if space == 'camera':
return pts.T
# Need to further transform to object space
rot_mat = self.ext_mat[:, :3] # happens first in projection
trans_vec = self.ext_mat[:, 3].reshape(-1, 1) # happens second in projection
n_pts = pts.shape[1]
pts_obj = | np.linalg.inv(rot_mat) | numpy.linalg.inv |
##### SOLVING VIBRATIONS PROBLEMS BY NUMERICAL METHODS #####
import numpy as np
import numpy.linalg as la
import Dynas as dyn
import pandas as pd
import matplotlib.pyplot as plt
K = np.array([[ 8, -4, 0],
[-4, 8, -4],
[ 0, -4, 4]])
M = np.diag([4, 4, 4])
def modal_analysis(K,M):
# solving the eigenvalue/eigenvector problem
D = np.dot(la.inv(M),K)
lambdak, Phi = la.eig(D)
# sorting the eigenvalues and eigenvectors in ascending order
index_lambdak = lambdak.argsort()
lambdak = lambdak[index_lambdak]
Phi = Phi[:, index_lambdak]
# computing the natural angular frequencys and natural frequencys
wk = np.sqrt(np.real(lambdak))
fk = wk/(2*np.pi)
return fk, wk, Phi
def finite_diff(F, x0, v0, dt, M, K, C, T):
""" SOLVING DIFFERENTIAL EQUATIONS BY THE FINITE DIFFERENCE METHOD
F = matrix including in each column the load vector along time with step dt
x0 = initial position column vector
v0 = initial velocity column vector
dt = time step (uniform along duration)
M = mass matrix
K = stiffness matrix
C = damping matrix
T = total duration of the analysis (not necessarily the same duration of the load)"""
### INITIAL PARAMETERS ####
# defining the number of steps of analysis = Ns
Ns = int(T/dt)+1
# step t0 (initial acceleration)
ngl = np.shape(F)[0] # captures the number of degrees of freedom
### MODELLING THE DISPLACEMENTS ###
x_before = np.zeros((ngl,1))
# matrix that indicates the displacements, in each degree of freedom, along the time of
# duration of analysis. Each column is a time step
x = np.zeros((ngl, Ns))
x[:,0] = x0[:,0]
### SOLVING INITIAL STEP ###
# initial Force F0 is equivalent to the first column of the matrix of load vectors F along time
aux1 = np.zeros((ngl,1))
aux1[:,0] = np.copy(F[:,0])
aux2 = aux1 - np.dot(C,v0) - np.dot(K,x0)
a0 = np.dot(la.inv(M),aux2)
# step t-1 (before initial condition)
x_before = dt*dt*a0/2 - dt*v0 + x0
# step t+1 (after initial condition)
C1 = M / (dt*dt) + C / (2*dt)
C2 = K - 2*M / (dt*dt)
C3 = M / (dt*dt) - C / (2*dt)
aux3 = aux1 - np.dot(C2, x0) - | np.dot(C3, x_before) | numpy.dot |
import struct
import socket
import pickle
import json
from torch.optim import SGD, Adam, AdamW
import sys
import time
import random
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
#import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score, auc, f1_score, precision_score, recall_score, roc_auc_score
from sklearn.preprocessing import MinMaxScaler
import Metrics
import wfdb
import ast
import math
import os.path
import utils
import Models
#np.set_printoptions(threshold=np.inf)
cwd = os.path.dirname(os.path.abspath(__file__))
mlb_path = os.path.join(cwd, "..","Benchmark", "output", "mlb.pkl")
scaler_path = os.path.join(cwd, "..","Benchmark", "output", "standard_scaler.pkl")
ptb_path = os.path.join(cwd, "..", "server", "../server/PTB-XL", "ptb-xl/")
import wandb
wandb.init(project="non-IID,clean", entity="split-learning-medical")
client_num = 1
num_classes = 2
pretrain_this_client = 0
simultrain_this_client = 0
pretrain_epochs = 50
IID = 0
f = open('parameter_client.json', )
data = json.load(f)
# set parameters fron json file
#epoch = data["training_epochs"]
lr = data["learningrate"]
batchsize = data["batchsize"]
batch_concat = data["batch_concat"]
host = data["host"]
port = data["port"]
max_recv = data["max_recv"]
autoencoder = data["autoencoder"]
detailed_output = data["detailed_output"]
count_flops = data["count_flops"]
plots = data["plots"]
autoencoder_train = data["autoencoder_train"]
deactivate_train_after_num_epochs = data["deactivate_train_after_num_epochs"]
grad_encode = data["grad_encode"]
train_gradAE_active = data["train_gradAE_active"]
deactivate_grad_train_after_num_epochs = data["deactivate_grad_train_after_num_epochs"]
wandb.init(config={
"learning_rate": lr,
#"epochs": epoch,
"batch_size": batchsize,
"autoencoder": autoencoder
})
wandb.config.update({"learning_rate": lr, "PC: ": 2})
def print_json():
print("learningrate: ", lr)
print("grad_encode: ", grad_encode)
print("gradAE_train: ", train_gradAE_active)
print("deactivate_grad_train_after_num_epochs: ", deactivate_grad_train_after_num_epochs)
#print("Getting the metadata epoch: ", epoch)
print("Getting the metadata host: ", host)
print("Getting the metadata port: ", port)
print("Getting the metadata batchsize: ", batchsize)
print("Autoencoder: ", autoencoder)
print("detailed_output: ", detailed_output)
print("count_flops: ", count_flops)
print("plots: ", plots)
print("autoencoder_train: ", autoencoder_train)
print("deactivate_train_after_num_epochs: ", deactivate_train_after_num_epochs)
# load data from json file
class PTB_XL(Dataset):
def __init__(self, stage=None):
self.stage = stage
if self.stage == 'train':
global X_train
global y_train
self.y_train = y_train
self.X_train = X_train
if self.stage == 'val':
global y_val
global X_val
self.y_val = y_val
self.X_val = X_val
if self.stage == 'test':
global y_test
global X_test
self.y_test = y_test
self.X_test = X_test
if self.stage == 'raw':
global y_raw
global X_raw
self.y_raw = y_raw
self.X_raw = X_raw
def __len__(self):
if self.stage == 'train':
return len(self.y_train)
if self.stage == 'val':
return len(self.y_val)
if self.stage == 'test':
return len(self.y_test)
if self.stage == 'raw':
return len(self.y_raw)
def __getitem__(self, idx):
if self.stage == 'train':
sample = self.X_train[idx].transpose((1, 0)), self.y_train[idx]
if self.stage == 'val':
sample = self.X_val[idx].transpose((1, 0)), self.y_val[idx]
if self.stage == 'test':
sample = self.X_test[idx].transpose((1, 0)), self.y_test[idx]
if self.stage == 'raw':
sample = self.X_raw[idx].transpose((1, 0)), self.y_raw[idx]
return sample
def init():
train_dataset = PTB_XL('train')
val_dataset = PTB_XL('val')
if IID:
train_1, rest1 = torch.utils.data.random_split(train_dataset, [3853, 15414], generator=torch.Generator().manual_seed(42))
train_2, rest2 = torch.utils.data.random_split(rest1, [3853, 11561], generator=torch.Generator().manual_seed(42))
train_3, rest3 = torch.utils.data.random_split(rest2, [3853, 7708], generator=torch.Generator().manual_seed(42))
train_4, train_5 = torch.utils.data.random_split(rest3, [3853, 3855], generator=torch.Generator().manual_seed(42))
if client_num == 1: train_dataset = train_1
if client_num == 2: train_dataset = train_2
if client_num == 3: train_dataset = train_3
if client_num == 4: train_dataset = train_4
if client_num == 5: train_dataset = train_5
if pretrain_this_client:
raw_dataset = PTB_XL('raw')
print("len raw dataset", len(raw_dataset))
pretrain_dataset, no_dataset = torch.utils.data.random_split(raw_dataset, [963, 18304],
generator=torch.Generator().manual_seed(42))
print("pretrain_dataset length: ", len(pretrain_dataset))
global pretrain_loader
pretrain_loader = torch.utils.data.DataLoader(pretrain_dataset, batch_size=batchsize, shuffle=True)
if simultrain_this_client:
raw_dataset = PTB_XL('raw')
print("len raw dataset", len(raw_dataset))
pretrain_dataset, no_dataset = torch.utils.data.random_split(raw_dataset, [963, 18304],
generator=torch.Generator().manual_seed(42))
print("len train dataset", len(train_dataset))
train_dataset = torch.utils.data.ConcatDataset((pretrain_dataset, train_dataset))
print("len mixed-train dataset", len(train_dataset))
print("train_dataset length: ", len(train_dataset))
print("val_dataset length: ", len(train_dataset))
global train_loader
global val_loader
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchsize, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batchsize, shuffle=True)
"""
def new_split():
global train_loader
global val_loader
train_dataset, val_dataset = torch.utils.data.random_split(training_dataset,
[size_train_dataset,
len(training_dataset) - size_train_dataset])
print("train_dataset size: ", size_train_dataset)
print("val_dataset size: ", len(training_dataset) - size_train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchsize, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batchsize, shuffle=True)
"""
if count_flops: #Does not work on the Jetson Nano yet. The amount of FLOPs doesn't depend on the architecture. Measuring FLOPs on the PC and JetsonNano would result in the same outcome.
# The paranoid switch prevents the FLOPs count
# Solution: sudo sh -c 'echo 1 >/proc/sys/kernel/perf_event_paranoid'
# Needs to be done after every restart of the PC
from ptflops import get_model_complexity_info
from pypapi import events, papi_high as high
def str_to_number(label):
a = np.zeros(5)
if not label:
return a
for i in label:
if i == 'NORM':
a[0] = 1
if i == 'MI':
a[1] = 1
if i == 'STTC':
a[2] = 1
if i == 'HYP':
a[3] = 1
if i == 'CD':
a[4] = 1
return a
#send/recieve system:
def send_msg(sock, getid, content):
"""
pickles the content (creates bitstream), adds header and send message via tcp port
:param sock: socket
:param content: content to send via tcp port
"""
msg = [getid, content] # add getid
msg = pickle.dumps(msg)
msg = struct.pack('>I', len(msg)) + msg # add 4-byte length in network byte order
#print("communication overhead send: ", sys.getsizeof(msg), " bytes")
global data_send_per_epoch
data_send_per_epoch += sys.getsizeof(msg)
sock.sendall(msg)
def recieve_msg(sock):
"""
recieves the meassage with helper function, umpickles the message and separates the getid from the actual massage content
:param sock: socket
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
return msg
def recieve_request(sock):
"""
recieves the meassage with helper function, umpickles the message and separates the getid from the actual massage content
:param sock: socket
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
getid = msg[0]
content = msg[1]
handle_request(sock, getid, content)
def recv_msg(sock):
"""
gets the message length (which corresponds to the first
4 bytes of the recieved bytestream) with the recvall function
:param sock: socket
:return: returns the data retrieved from the recvall function
"""
# read message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
#print("Message length:", msglen)
global data_recieved_per_epoch
data_recieved_per_epoch += msglen
# read the message data
return recvall(sock, msglen)
def recvall(sock, n):
"""
returns the data from a recieved bytestream, helper function
to receive n bytes or return None if EOF is hit
:param sock: socket
:param n: length in bytes (number of bytes)
:return: message
"""
#
data = b''
while len(data) < n:
if detailed_output:
print("Start function sock.recv")
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
# print("Daten: ", data)
return data
def handle_request(sock, getid, content):
"""
executes the requested function, depending on the get id, and passes the recieved message
:param sock: socket
:param getid: id of the function, that should be executed if the message is recieved
:param content: message content
"""
#print("request mit id:", getid)
switcher = {
0: initialize_model,
1: train_epoch,
2: val_stage,
3: test_stage,
}
switcher.get(getid, "invalid request recieved")(sock, content)
def serverHandler(conn):
while True:
recieve_request(conn)
def grad_postprocessing(grad):
grad_new = grad.numpy()
for a in range(64):
#scaler.fit(grad[a])
grad_new[a] = scaler.inverse_transform(grad[a])
grad_new = torch.DoubleTensor(grad_new).to(device)
return grad_new
def train_epoch(s, pretraining):
#new_split() #new random dist between train and val
loss_grad_total = 0
global epoch
epoch += 1
flops_forward_epoch, flops_encoder_epoch, flops_backprop_epoch, flops_rest, flops_send = 0,0,0,0,0
#Specify AE configuration
train_active = 0 #default: AE is pretrained
train_grad_active = 0
if epoch < deactivate_train_after_num_epochs:
if autoencoder_train:
train_active = 1
if epoch < deactivate_grad_train_after_num_epochs:
if train_gradAE_active:
train_grad_active = 1
global data_send_per_epoch, data_recieved_per_epoch, data_send_per_epoch_total, data_recieved_per_epoch_total
data_send_per_epoch, data_recieved_per_epoch = 0, 0
correct_train, total_train, train_loss = 0, 0, 0
batches_aborted, total_train_nr, total_val_nr, total_test_nr = 0, 0, 0, 0
hamming_epoch, precision_epoch, recall_epoch, f1_epoch, auc_train = 0, 0, 0, 0, 0
#encoder_grad_server = 0
epoch_start_time = time.time()
loader = pretrain_loader if pretraining else train_loader
for b, batch in enumerate(loader):
if count_flops:
x = high.read_counters()
#print("batch: ", b)
# print("FLOPs dataloader: ", x)
# if b % 100 == 0:
# print("batch ", b, " / ", total_batch)
forward_time = time.time()
active_training_time_batch_client = 0
start_time_batch_forward = time.time()
# define labels and data per batch
x_train, label_train = batch
x_train = x_train.to(device)
# x_train = x_train.to(device)
label_train = label_train.double().to(device)
if len(x_train) != 64:
break
if count_flops:
x = high.read_counters()
flops_rest += x[0] # reset Flop Counter
optimizer.zero_grad() # sets gradients to 0 - start for backprop later
client_output_backprop = client(x_train)
client_output_train = client_output_backprop.detach().clone()
if count_flops:
x = high.read_counters()
#print("FLOPs forward: ", x)
flops_forward_epoch += x[0]
client_output_train_without_ae_send = 0
if autoencoder:
if train_active:
optimizerencode.zero_grad()
# client_output_train_without_ae = client_output_train.clone().detach().requires_grad_(False)
client_encoded = encode(client_output_train)
client_output_send = client_encoded.detach().clone()
if train_active:
client_output_train_without_ae_send = client_output_train.detach().clone()
else:
client_output_send = client_output_train.detach().clone()
# client_output_send = encode(client_output_train)
if count_flops:
x = high.read_counters()
flops_encoder_epoch += x[0]
global encoder_grad_server
msg = {
'client_output_train': client_output_send,
'client_output_train_without_ae': client_output_train_without_ae_send,
'label_train': label_train, # concat_labels,
'batch_concat': batch_concat,
'batchsize': batchsize,
'train_active': train_active,
'encoder_grad_server': encoder_grad_server,
'train_grad_active': train_grad_active,
'grad_encode': grad_encode
}
active_training_time_batch_client += time.time() - start_time_batch_forward
if detailed_output:
print("Send the message to server")
send_msg(s, 0, msg)
# while concat_counter_recv < concat_counter_send:
msg = recieve_msg(s)
# print("msg: ", msg)
if pretraining == 0:
wandb.log({"dropout_threshold": msg["dropout_threshold"]}, commit=False)
# decode grad:
client_grad_without_encode = msg["client_grad_without_encode"]
client_grad = msg["grad_client"]
global scaler
scaler = msg["scaler"]
if msg["grad_encode"]:
if train_grad_active:
# print("train_active")
optimizer_grad_decoder.zero_grad()
client_grad = Variable(client_grad, requires_grad=True)
client_grad_decode = grad_decoder(client_grad)
if train_grad_active:
loss_grad_autoencoder = error_grad_autoencoder(client_grad_without_encode, client_grad_decode)
loss_grad_total += loss_grad_autoencoder.item()
loss_grad_autoencoder.backward()
encoder_grad_server = client_grad.grad.detach().clone()#
optimizer_grad_decoder.step()
# print("loss_grad_autoencoder: ", loss_grad_autoencoder)
else:
encoder_grad_server = 0
client_grad_decode = grad_postprocessing(client_grad_decode.detach().clone().cpu())
else:
if msg["client_grad_abort"] == 0:
client_grad_decode = client_grad.detach().clone()
#else:
# client_grad = "abort"
encoder_grad_server = 0
start_time_batch_backward = time.time()
encoder_grad = msg["encoder_grad"]
if client_grad == "abort":
# print("client_grad: ", client_grad)
train_loss_add, add_correct_train, add_total_train = msg["train_loss"], msg["add_correct_train"], \
msg["add_total_train"]
correct_train += add_correct_train
total_train_nr += 1
total_train += add_total_train
train_loss += train_loss_add
batches_aborted += 1
output_train = msg["output_train"]
# print("train_loss: ", train_loss/total_train_nr)
# meter.update(output_train, label_train, train_loss/total_train_nr)
pass
else:
if train_active:
client_encoded.backward(encoder_grad)
optimizerencode.step()
# concat_tensors[concat_counter_recv].to(device)
# concat_tensors[concat_counter_recv].backward(client_grad)
# client_output_backprob.to(device)
# if b % 1000 == 999:
# print("Backprop with: ", client_grad)
if count_flops:
x = high.read_counters() # reset counter
flops_rest += x[0]
flops_send += x[0]
client_output_backprop.backward(client_grad_decode)
optimizer.step()
if count_flops:
x = high.read_counters()
# print("FLOPs backprob: ", x)
flops_backprop_epoch += x[0]
train_loss_add, add_correct_train, add_total_train = msg["train_loss"], msg["add_correct_train"], \
msg["add_total_train"]
correct_train += add_correct_train
total_train_nr += 1
total_train += add_total_train
train_loss += train_loss_add
output_train = msg["output_train"]
# print("train_loss: ", train_loss/total_train_nr)
# meter.update(output_train, label_train, train_loss/total_train_nr)
# wandb.watch(client, log_freq=100)
output = torch.round(output_train)
# if np.sum(label.cpu().detach().numpy()[0]) > 1:
# if np.sum(output.cpu().detach().numpy()[0] > 1):
# print("output[0]: ", output.cpu().detach().numpy()[0])
# print("label [0]: ", label.cpu().detach().numpy()[0])
#if (total_train_nr % 100 == 0):
# print("output[0]: ", output.cpu().detach().numpy()[0])
# print("label [0]: ", label_train.cpu().detach().numpy()[0])
#global batches_abort_rate_total
#batches_abort_rate_total.append(batches_aborted / total_train_nr)
active_training_time_batch_client += time.time() - start_time_batch_backward
#active_training_time_batch_server = msg["active_trtime_batch_server"]
#active_training_time_epoch_client += active_training_time_batch_client
#active_training_time_epoch_server += active_training_time_batch_server
#
try:
roc_auc = roc_auc_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),average='micro')
auc_train += roc_auc
except:
# print("auc_train_exception: ")
# print("label: ", label)
# print("output: ", output)
pass
hamming_epoch += Metrics.Accuracy(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu())
# accuracy_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu())
precision_epoch += precision_score(label_train.detach().clone().cpu(),
torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
epoch_endtime = time.time() - epoch_start_time
if pretraining:
status_epoch_train = "epoch: {}, AUC_train: {:.4f}, Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, trainingtime for epoch: {:.6f}s, batches abortrate:{:.2f}, train_loss: {:.4f} ".format(
epoch, auc_train / total_train_nr, hamming_epoch / total_train_nr, precision_epoch / total_train_nr,
recall_epoch / total_train_nr,
f1_epoch / total_train_nr, epoch_endtime, batches_aborted / total_train_nr,
train_loss / total_train_nr)
print("status_epoch_pretrain: ", status_epoch_train)
else:
flops_client_forward_total.append(flops_forward_epoch)
flops_client_encoder_total.append(flops_encoder_epoch)
flops_client_backprop_total.append(flops_backprop_epoch)
print("data_send_per_epoch: ", data_send_per_epoch / 1000000, " MegaBytes")
print("data_recieved_per_epoch: ", data_recieved_per_epoch / 1000000, "MegaBytes")
data_send_per_epoch_total.append(data_send_per_epoch)
data_recieved_per_epoch_total.append(data_recieved_per_epoch)
status_epoch_train = "epoch: {}, AUC_train: {:.4f}, Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, trainingtime for epoch: {:.6f}s, batches abortrate:{:.2f}, train_loss: {:.4f} ".format(
epoch, auc_train / total_train_nr, hamming_epoch / total_train_nr, precision_epoch / total_train_nr,
recall_epoch / total_train_nr,
f1_epoch / total_train_nr, epoch_endtime, batches_aborted / total_train_nr,
train_loss / total_train_nr)
print("status_epoch_train: ", status_epoch_train)
if count_flops:
print("MegaFLOPS_forward_epoch", flops_forward_epoch / 1000000)
print("MegaFLOPS_encoder_epoch", flops_encoder_epoch / 1000000)
print("MegaFLOPS_backprop_epoch", flops_backprop_epoch / 1000000)
print("MegaFLOPS_rest", flops_rest / 1000000)
print("MegaFLOPS_send", flops_send / 1000000)
wandb.log({"Batches Abortrate": batches_aborted / total_train_nr,
"MegaFLOPS Client Encoder": flops_encoder_epoch / 1000000,
"MegaFLOPS Client Forward": flops_forward_epoch / 1000000,
"MegaFLOPS Client Backprop": flops_backprop_epoch / 1000000},
commit=False)
global auc_train_log
auc_train_log = auc_train / total_train_nr
global accuracy_train_log
accuracy_train_log = hamming_epoch / total_train_nr
global batches_abort_rate_total
batches_abort_rate_total.append(batches_aborted / total_train_nr)
initial_weights = client.state_dict()
send_msg(s, 2, initial_weights)
msg = 0
send_msg(s, 3, msg)
def val_stage(s, pretraining=0):
total_val_nr, val_loss_total, correct_val, total_val = 0, 0, 0, 0
val_losses, val_accs = [], []
hamming_epoch, precision_epoch, recall_epoch, f1_epoch, accuracy, auc_val = 0, 0, 0, 0, 0, 0
val_time = time.time()
with torch.no_grad():
for b_t, batch_t in enumerate(val_loader):
x_val, label_val = batch_t
x_val, label_val = x_val.to(device), label_val.double().to(device)
optimizer.zero_grad()
output_val = client(x_val, drop=False)
client_output_val = output_val.clone().detach().requires_grad_(True)
if autoencoder:
client_output_val = encode(client_output_val)
msg = {'client_output_val/test': client_output_val,
'label_val/test': label_val,
}
if detailed_output:
print("The msg is:", msg)
send_msg(s, 1, msg)
if detailed_output:
print("294: send_msg success!")
msg = recieve_msg(s)
if detailed_output:
print("296: recieve_msg success!")
correct_val_add = msg["correct_val/test"]
val_loss = msg["val/test_loss"]
output_val_server = msg["output_val/test_server"]
val_loss_total += val_loss
correct_val += correct_val_add
total_val_add = len(label_val)
total_val += total_val_add
total_val_nr += 1
try:
roc_auc = roc_auc_score(label_val.detach().clone().cpu(), torch.round(output_val_server).detach().clone().cpu(), average='micro')
auc_val += roc_auc
except:
# print("auc_train_exception: ")
# print("label: ", label)
# print("output: ", output)
pass
output_val_server = torch.round(output_val_server)
hamming_epoch += Metrics.Accuracy(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu())
#accuracy_score(label_val.detach().clone().cpu(),
# torch.round(output_val_server).detach().clone().cpu())
precision_epoch += precision_score(label_val.detach().clone().cpu(),
output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
status_epoch_val = "epoch: {},AUC_val: {:.4f} ,Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, val_loss: {:.4f}".format(
epoch, auc_val / total_val_nr, hamming_epoch / total_val_nr, precision_epoch / total_val_nr,
recall_epoch / total_val_nr,
f1_epoch / total_val_nr, val_loss_total / total_val_nr)
print("status_epoch_val: ", status_epoch_val)
if pretraining == 0:
wandb.log({"Loss_val": val_loss_total / total_val_nr,
"Accuracy_val_micro": hamming_epoch / total_val_nr,
"F1_val": f1_epoch / total_val_nr,
"AUC_val": auc_val / total_val_nr,
"AUC_train": auc_train_log,
"Accuracy_train_micro": accuracy_train_log})
send_msg(s, 3, 0)
def test_stage(s, epoch):
loss_test = 0.0
correct_test, total_test = 0, 0
hamming_epoch = 0
precision_epoch = 0
recall_epoch = 0
f1_epoch = 0
total_test_nr = 0
with torch.no_grad():
for b_t, batch_t in enumerate(val_loader):
x_test, label_test = batch_t
x_test, label_test = x_test.to(device), label_test.double().to(device)
optimizer.zero_grad()
output_test = client(x_test, drop=False)
client_output_test = output_test.clone().detach().requires_grad_(True)
if autoencoder:
client_output_test = encode(client_output_test)
msg = {'client_output_val/test': client_output_test,
'label_val/test': label_test,
}
if detailed_output:
print("The msg is:", msg)
send_msg(s, 1, msg)
if detailed_output:
print("294: send_msg success!")
msg = recieve_msg(s)
if detailed_output:
print("296: recieve_msg success!")
correct_test_add = msg["correct_val/test"]
test_loss = msg["val/test_loss"]
output_test_server = msg["output_val/test_server"]
loss_test += test_loss
correct_test += correct_test_add
total_test_add = len(label_test)
total_test += total_test_add
total_test_nr += 1
output_test_server = torch.round(output_test_server)
hamming_epoch += Metrics.Accuracy(label_test.detach().clone().cpu(), output_test_server.detach().clone().cpu())
#accuracy_score(label_test.detach().clone().cpu(),
#torch.round(output_test_server).detach().clone().cpu())
precision_epoch += precision_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
status_test = "test: hamming_epoch: {:.4f}, precision_epoch: {:.4f}, recall_epoch: {:.4f}, f1_epoch: {:.4f}".format(
hamming_epoch / total_test_nr, precision_epoch / total_test_nr, recall_epoch / total_test_nr,
f1_epoch / total_test_nr)
print("status_test: ", status_test)
global data_send_per_epoch_total
global data_recieved_per_epoch_total
global batches_abort_rate_total
data_transfer_per_epoch = 0
average_dismissal_rate = 0
total_flops_forward = 0
total_flops_encoder = 0
total_flops_backprob = 0
for data in data_send_per_epoch_total:
data_transfer_per_epoch += data
for data in data_recieved_per_epoch_total:
data_transfer_per_epoch += data
for data in batches_abort_rate_total:
average_dismissal_rate += data
for flop in flops_client_forward_total:
total_flops_forward += flop
for flop in flops_client_encoder_total:
total_flops_encoder += flop
for flop in flops_client_backprop_total:
total_flops_backprob += flop
total_flops = total_flops_backprob + total_flops_encoder + total_flops_forward
print("total FLOPs forward: ", total_flops_forward)
print("total FLOPs encoder: ", total_flops_encoder)
print("total FLOPs backprob: ", total_flops_backprob)
print("total FLOPs client: ", total_flops)
print("Average data transfer/epoch: ", data_transfer_per_epoch / epoch / 1000000, " MB")
print("Average dismissal rate: ", average_dismissal_rate / epoch)
wandb.config.update({"Average data transfer/epoch (MB): ": data_transfer_per_epoch / epoch / 1000000,
"Average dismissal rate: ": average_dismissal_rate / epoch,
"total_MegaFLOPS_forward": total_flops_forward/1000000, "total_MegaFLOPS_encoder": total_flops_encoder/1000000,
"total_MegaFLOPS_backprob": total_flops_backprob/1000000, "total_MegaFLOPS": total_flops/1000000})
msg = 0
send_msg(s, 3, msg)
def initialize_model(s, msg):
"""
if new connected client is not the first connected client,
the initial weights are fetched from the server
:param conn:
"""
#msg = recieve_msg(s)
if msg == 0:
#print("msg == 0")
pass
else:
print("msg != 0")
client.load_state_dict(msg, strict=False)
print("model successfully initialized")
#print("start_training")
# start_training(s)
#train_epoch(s)
def initIID():
global X_train, X_val, y_val, y_train, y_test, X_test
sampling_frequency = 100
datafolder = ptb_path
task = 'superdiagnostic'
outputfolder = mlb_path
# Load PTB-XL data
data, raw_labels = utils.load_dataset(datafolder, sampling_frequency)
# Preprocess label data
labels = utils.compute_label_aggregations(raw_labels, datafolder, task)
# Select relevant data and convert to one-hot
data, labels, Y, _ = utils.select_data(data, labels, task, min_samples=0, outputfolder=outputfolder)
input_shape = data[0].shape
print(input_shape)
# 1-9 for training
X_train = data[labels.strat_fold < 10]
y_train = Y[labels.strat_fold < 10]
# 10 for validation
X_val = data[labels.strat_fold == 10]
y_val = Y[labels.strat_fold == 10]
# X_test = data[labels.strat_fold == 10]
# y_test = Y[labels.strat_fold == 10]
num_classes = 5 # <=== number of classes in the finetuning dataset
input_shape = [1000, 12] # <=== shape of samples, [None, 12] in case of different lengths
print(X_train.shape, y_train.shape, X_val.shape, y_val.shape) # , X_test.shape, y_test.shape)
import pickle
standard_scaler = pickle.load(open(scaler_path, "rb"))
X_train = utils.apply_standardizer(X_train, standard_scaler)
X_val = utils.apply_standardizer(X_val, standard_scaler)
global X_raw, y_raw
X_raw = X_train
y_raw = y_train
def init_nonIID():
global X_train, X_val, y_val, y_train, y_test, X_test
norm, mi, sttc, hyp, cd = [],[],[],[],[]
for a in range(len(y_train)):
if label_class(y_train[a], 0):
sttc.append(X_train[a])
if label_class(y_train[a], 1):
hyp.append(X_train[a])
if label_class(y_train[a], 2):
mi.append(X_train[a])
if label_class(y_train[a], 3):
norm.append(X_train[a])
if label_class(y_train[a], 4):
cd.append(X_train[a])
"""
print("norm shape: ", len(norm))
print("mi shape: ", len(mi))
print("sttc shape: ", len(sttc))
print("hyp shape: ", len(hyp))
print("cd shape: ", len(cd))
print("norm label: ", label_norm[0])
print("mi label: ", label_mi[0])
print("sttc label: ", label_sttc[0])
print("hyp label: ", label_hyp[0])
print("cd label: ", label_cd[0])
print("norm label: ", len(label_norm))
print("mi label: ", len(label_mi))
print("sttc label: ", len(label_sttc))
print("hyp label: ", len(label_hyp))
print("cd label: ", len(label_cd))
"""
if client_num == 1:
if num_classes == 1:
print("Client number: ", client_num, " Class norm")
X_train = norm
y_train = label_norm
if num_classes == 2:
print("Client number: ", client_num, " Class norm, mi")
X_train = np.concatenate((norm, mi), axis=0)
y_train = np.concatenate((label_norm, label_mi), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class norm, mi, sttc")
X_train = np.concatenate((norm, mi), axis=0)
X_train = np.concatenate((X_train, sttc), axis=0)
y_train = np.concatenate((label_norm, label_mi), axis=0)
y_train = np.concatenate((y_train, label_sttc), axis=0)
if client_num == 2:
if num_classes == 1:
print("Client number: ", client_num, " Class mi")
X_train = mi
y_train = label_mi
if num_classes == 2:
print("Client number: ", client_num, " Class mi, sttc")
X_train = np.concatenate((mi, sttc), axis=0)
y_train = np.concatenate((label_mi, label_sttc), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class mi, sttc, hyp")
X_train = np.concatenate((mi, sttc), axis=0)
X_train = np.concatenate((X_train, hyp), axis=0)
y_train = np.concatenate((label_mi, label_sttc), axis=0)
y_train = np.concatenate((y_train, label_hyp), axis=0)
if client_num == 3:
if num_classes == 1:
print("Client number: ", client_num, " Class sttc")
X_train = sttc
y_train = label_sttc
if num_classes == 2:
print("Client number: ", client_num, " Class sttc, hyp")
X_train = np.concatenate((sttc, hyp), axis=0)
y_train = np.concatenate((label_sttc, label_hyp), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class sttc, hyp, cd")
X_train = np.concatenate((sttc, hyp), axis=0)
X_train = np.concatenate((X_train, cd), axis=0)
y_train = np.concatenate((label_sttc, label_hyp), axis=0)
y_train = np.concatenate((y_train, label_cd), axis=0)
if client_num == 4:
if num_classes == 1:
print("Client number: ", client_num, " Class hyp")
X_train = hyp
y_train = label_hyp
if num_classes == 2:
print("Client number: ", client_num, " Class hyp, cd")
X_train = np.concatenate((hyp, cd), axis=0)
y_train = np.concatenate((label_hyp, label_cd), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class hyp, cd, norm")
X_train = np.concatenate((hyp, cd), axis=0)
X_train = np.concatenate((X_train, norm), axis=0)
y_train = np.concatenate((label_hyp, label_cd), axis=0)
y_train = np.concatenate((y_train, label_norm), axis=0)
if client_num == 5:
if num_classes == 1:
print("Client number: ", client_num, " Class cd")
X_train = cd
y_train = label_cd
if num_classes == 2:
print("Client number: ", client_num, " Class cd, norm")
X_train = np.concatenate((cd, norm), axis=0)
y_train = np.concatenate((label_cd, label_norm), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class cd, norm, mi")
X_train = | np.concatenate((cd, norm), axis=0) | numpy.concatenate |
"""
Created on 13 April 2020
Last Update on 17 April 2020
@author: Md. <NAME>
version: 1.1
Approach: Vectorisation
"""
from __future__ import division
import numpy as np
from scipy.stats import rankdata
from collections import namedtuple
# Supporting Functions
# Data Preprocessing
def __preprocessing(x):
try:
if x.index.dtype != 'int64':
idx = x.index.date.astype('str')
else:
idx = np.asarray(range(1, len(x)+1))
except:
idx = np.asarray(range(1, len(x)+1))
x = | np.asarray(x) | numpy.asarray |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from ibllib.dsp import rms
def wiggle(w, fs=1, gain=0.71, color='k', ax=None, fill=True, linewidth=0.5, t0=0, **kwargs):
"""
Matplotlib display of wiggle traces
:param w: 2D array (numpy array dimension nsamples, ntraces)
:param fs: sampling frequency
:param gain: display gain
:param color: ('k') color of traces
:param ax: (None) matplotlib axes object
:param fill: (True) fill variable area above 0
:param t0: (0) timestamp of the first sample
:return: None
"""
nech, ntr = w.shape
tscale = np.arange(nech) / fs
sf = gain / np.sqrt(rms(w.flatten()))
def insert_zeros(trace):
# Insert zero locations in data trace and tt vector based on linear fit
# Find zeros
zc_idx = np.where(np.diff(np.signbit(trace)))[0]
x1 = tscale[zc_idx]
x2 = tscale[zc_idx + 1]
y1 = trace[zc_idx]
y2 = trace[zc_idx + 1]
a = (y2 - y1) / (x2 - x1)
tt_zero = x1 - y1 / a
# split tt and trace
tt_split = np.split(tscale, zc_idx + 1)
trace_split = | np.split(trace, zc_idx + 1) | numpy.split |
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import pytest
import unittest
import shutil
import os
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.logging.WARN)
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import numpy as np
import json
import libpymo
import time
from aimet_tensorflow.quantsim import QuantizationSimModel
from aimet_tensorflow.utils.graph_saver import load_model_from_meta
from aimet_tensorflow.common.graph_eval import initialize_uninitialized_vars
from aimet_tensorflow.examples.test_models import model_with_dtype_int, keras_model_functional
from aimet_common.defs import QuantScheme
from aimet_tensorflow.quantsim import save_checkpoint, load_checkpoint
from aimet_tensorflow.utils.constants import QuantizeOpIndices
class TestQuantSim(unittest.TestCase):
def test_construction_cpu_model(self):
"""
Create QuantSim for a CPU model and check that quantizers have been added to the graph
"""
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=False)
# One run through the model to check if the ops got added correctly
model_output = sess.graph.get_tensor_by_name('conv2d_1/BiasAdd_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
# Check that quantized ops got added for all params
quant_ops = [op for op in sess.graph.get_operations() if op.type == 'QcQuantize']
for op in quant_ops:
print(op.name)
self.assertEqual(10, len(quant_ops))
# Check that the quant ops are correctly connected in the graph
self.assertEqual('Conv2D', quant_ops[0].outputs[0].consumers()[0].type)
self.assertEqual('BiasAdd', quant_ops[1].outputs[0].consumers()[0].type)
self.assertEqual(int(libpymo.TensorQuantizerOpMode.passThrough), sess.run(quant_ops[1].inputs[1]))
# Check that op-mode is set correctly
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sess.run(quant_ops[0].inputs[1]))
sess.close()
sim.session.close()
del sim
@pytest.mark.cuda
def test_construction_gpu_model(self):
"""
Create QuantSim for a GPU model and check that quantizers have been added to the graph
"""
tf.compat.v1.reset_default_graph()
with tf.device('/gpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=True)
# One run through the model to check if the ops got added correctly
model_output = sess.graph.get_tensor_by_name('conv2d_1/BiasAdd_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
# Check that quantized ops got added for all params
quant_ops = [op for op in sess.graph.get_operations() if op.type == 'QcQuantize']
for op in quant_ops:
print(op.name)
self.assertEqual(10, len(quant_ops))
# Check that the quant ops are correctly connected in the graph
self.assertEqual('Conv2D', quant_ops[0].outputs[0].consumers()[0].type)
self.assertEqual('BiasAdd', quant_ops[1].outputs[0].consumers()[0].type)
# Check that op-mode is set correctly
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sess.run(quant_ops[0].inputs[1]))
sess.close()
sim.session.close()
del sim
def test_compute_encodings_cpu_model(self):
"""
Create QuantSim for a CPU model and test that activation encodings are computed
"""
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=False)
# Check that op-mode is set correctly
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sim.session.run(conv2d_weight_quant_op.inputs[1]))
self.assertEqual(int(libpymo.TensorQuantizerOpMode.updateStats),
sim.session.run(conv2d_output_quant_op.inputs[1]))
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('conv2d_1/Relu_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
# Check if encodings have been calculated
deactivated_quantizers = [
'conv2d_input_quantized',
'conv2d/BiasAdd_quantized',
'conv2d_1/BiasAdd_quantized'
]
for name, quantizer in sim._activation_quantizers.items():
if name in deactivated_quantizers:
self.assertTrue(int(libpymo.TensorQuantizerOpMode.passThrough),
sim.session.run(name + '_op_mode/read:0'))
else:
self.assertTrue(quantizer.tensor_quantizer.isEncodingValid,
"quantizer: {} does not have a valid encoding".format(name))
# Check that op-mode is set correctly
# Check that quantized ops got added for all params
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sim.session.run(conv2d_weight_quant_op.inputs[1]))
self.assertEqual(int(libpymo.TensorQuantizerOpMode.quantizeDequantize),
sim.session.run(conv2d_output_quant_op.inputs[1]))
def _save_to_keras_common_test_code(self, use_cuda):
tf.compat.v1.reset_default_graph()
if not use_cuda:
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
else:
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=use_cuda)
# Check that op-mode is set correctly
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sim.session.run(conv2d_weight_quant_op.inputs[1]))
self.assertEqual(int(libpymo.TensorQuantizerOpMode.updateStats),
sim.session.run(conv2d_output_quant_op.inputs[1]))
def dummy_forward_pass(sess, eval_tensor_name):
model_output = sess.graph.get_tensor_by_name(eval_tensor_name)
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, 'conv2d_1/Relu_quantized:0')
mod_sess = sim.save_to_keras()
# Check 1: The new graph is well formed. Try forward pass through the graph.
dummy_forward_pass(mod_sess, 'conv2d_1/Relu_quantized_static:0')
# Check 2: All the QcQuantizeOp nodes have no output - meaning are disconnected from the main graph
op_count = 0
for op in mod_sess.graph.get_operations():
if op.type == "QcQuantize":
op_count += 1
self.assertFalse(op.outputs[0].consumers())
# Check 3: One QcQuantizeStatic for each QcQuantize op
static_op_count = 0
for op in mod_sess.graph.get_operations():
if op.type == "QcQuantizeStatic":
static_op_count += 1
self.assertEqual(op_count, static_op_count)
# Check 4: Make sure the attributes are set correctly
op = mod_sess.graph.get_operation_by_name("conv2d/Conv2D/ReadVariableOp_quantized_static")
self.assertEqual(8, op.get_attr("bitwidth"))
self.assertEqual(1, op.get_attr("quant_scheme")) # TF-Enhanced
self.assertEqual(1, op.get_attr("op_mode")) # oneShotQuantizeDequantize
op = mod_sess.graph.get_operation_by_name("conv2d/BiasAdd_quantized_static")
self.assertEqual(3, op.get_attr("op_mode")) # passThrough
op = mod_sess.graph.get_operation_by_name("conv2d/Relu_quantized_static")
self.assertEqual(8, op.get_attr("bitwidth"))
self.assertEqual(1, op.get_attr("quant_scheme")) # TF-Enhanced
self.assertEqual(2, op.get_attr("op_mode")) # quantizeDequantize
sess.close()
sim.session.close()
del sim
def test_save_to_keras_cpu_model(self):
"""
Create sim model for a keras pipeline
"""
self._save_to_keras_common_test_code(False)
def test_save_to_keras_gpu_model(self):
"""
Create sim model for a keras pipeline
"""
self._save_to_keras_common_test_code(True)
@pytest.mark.cuda
def test_compute_encodings_gpu_model(self):
"""
Create QuantSim for a CPU model and test that activation encodings are computed
"""
tf.compat.v1.reset_default_graph()
with tf.device('/gpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=True)
# Check that op-mode is set correctly
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sim.session.run(conv2d_weight_quant_op.inputs[1]))
self.assertEqual(int(libpymo.TensorQuantizerOpMode.updateStats),
sim.session.run(conv2d_output_quant_op.inputs[1]))
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('conv2d_1/Relu_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
# Check if encodings have been calculated
deactivated_quantizers = [
'conv2d_input_quantized',
'conv2d/BiasAdd_quantized',
'conv2d_1/BiasAdd_quantized'
]
for name, quantizer in sim._activation_quantizers.items():
if name in deactivated_quantizers:
self.assertTrue(int(libpymo.TensorQuantizerOpMode.passThrough),
sim.session.run(name + '_op_mode/read:0'))
else:
self.assertTrue(quantizer.tensor_quantizer.isEncodingValid,
"quantizer: {} does not have a valid encoding".format(name))
# Check that op-mode is set correctly
# Check that quantized ops got added for all params
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
conv2d_output_quant_op = sim.session.graph.get_operation_by_name('conv2d/Relu_quantized')
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sim.session.run(conv2d_weight_quant_op.inputs[1]))
self.assertEqual(int(libpymo.TensorQuantizerOpMode.quantizeDequantize),
sim.session.run(conv2d_output_quant_op.inputs[1]))
sess.close()
sim.session.close()
del sim
@pytest.mark.cuda
def test_compute_encodings_quant_scheme_update(self):
"""
Create QuantSim model and update quantScheme using property interface
"""
tf.compat.v1.reset_default_graph()
np.random.seed(0)
tf.compat.v1.set_random_seed(0)
with tf.device('/gpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=True)
# Check that op-mode is set correctly
conv2d_weight_quant_op = sim.session.graph.get_operation_by_name('conv2d/Conv2D/ReadVariableOp_quantized')
self.assertEqual(int(libpymo.TensorQuantizerOpMode.oneShotQuantizeDequantize),
sim.session.run(conv2d_weight_quant_op.inputs[1]))
def dummy_forward_pass(sess, args):
np.random.seed(0)
tf.compat.v1.set_random_seed(0)
model_output = sess.graph.get_tensor_by_name('conv2d_1/Relu_quantized:0')
model_input = sess.graph.get_tensor_by_name('conv2d_input:0')
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
p_quantizer = sim.quantizer_config('conv2d/Conv2D/ReadVariableOp_quantized')
old_p_encoding_min = p_quantizer.get_variable_from_op(QuantizeOpIndices.encoding_min)
old_p_encoding_max = p_quantizer.get_variable_from_op(QuantizeOpIndices.encoding_max)
self.assertEqual(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, p_quantizer.quant_scheme)
p_quantizer.quant_scheme = QuantScheme.post_training_tf
self.assertEqual(libpymo.QuantizationMode.QUANTIZATION_TF, p_quantizer.quant_scheme)
# invoke compute encoding after quantScheme update
sim.compute_encodings(dummy_forward_pass, None)
new_p_encoding_min = p_quantizer.get_variable_from_op(QuantizeOpIndices.encoding_min)
new_p_encoding_max = p_quantizer.get_variable_from_op(QuantizeOpIndices.encoding_max)
# validate
self.assertNotEqual(old_p_encoding_min, new_p_encoding_min)
self.assertNotEqual(old_p_encoding_max, new_p_encoding_max)
sess.close()
sim.session.close()
del sim
def test_export_cpu_model(self):
"""
Create QuantSim for a CPU model, compute encodings and export out a resulting model
"""
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, [model.input.op.name], [model.output.op.name], use_cuda=False)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name(model.output.name)
model_output = model_output.consumers()[0].outputs[0]
model_input = sess.graph.get_tensor_by_name(model.input.name)
dummy_input = np.random.randn(20, 28, 28, 3)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
# Make some changes to model parameters to see if they are part of the exported model
with sim.session.graph.as_default():
first_bias_tensor = sim.session.graph.get_tensor_by_name('conv2d/BiasAdd/ReadVariableOp:0')
first_bias_tensor_val = sim.session.run(first_bias_tensor)
self.assertTrue(np.any(first_bias_tensor_val == 0))
first_bias_tensor_var = [var for var in tf.compat.v1.global_variables() if var.name == 'conv2d/bias:0'][0]
first_bias_tensor_var.load(np.ones(32), sim.session)
all_op_types = [op.type for op in sim.session.graph.get_operations()]
self.assertIn('QcQuantize', all_op_types)
sim.export('/tmp', 'quant_sim_model')
with open('/tmp/quant_sim_model.encodings') as json_file:
encoding_data = json.load(json_file)
activation_keys = list(encoding_data["activation_encodings"].keys())
self.assertTrue(activation_keys[0] == "conv2d/Relu:0")
self.assertTrue(isinstance(encoding_data["activation_encodings"]["conv2d/Relu:0"], list))
act_encoding_keys = encoding_data["activation_encodings"]["conv2d/Relu:0"][0].keys()
self.assertTrue("bitwidth" in act_encoding_keys)
self.assertTrue("is_symmetric" in act_encoding_keys)
self.assertTrue("max" in act_encoding_keys)
self.assertTrue("min" in act_encoding_keys)
self.assertTrue("offset" in act_encoding_keys)
self.assertTrue("scale" in act_encoding_keys)
param_keys = list(encoding_data["param_encodings"].keys())
self.assertTrue(param_keys[0] == "conv2d/Conv2D/ReadVariableOp:0")
self.assertTrue(isinstance(encoding_data["param_encodings"]["conv2d/Conv2D/ReadVariableOp:0"], list))
param_encoding_keys = encoding_data["param_encodings"]["conv2d/Conv2D/ReadVariableOp:0"][0].keys()
self.assertTrue("bitwidth" in param_encoding_keys)
self.assertTrue("is_symmetric" in param_encoding_keys)
self.assertTrue("max" in param_encoding_keys)
self.assertTrue("min" in param_encoding_keys)
self.assertTrue("offset" in param_encoding_keys)
self.assertTrue("scale" in param_encoding_keys)
new_sess = load_model_from_meta('/tmp/quant_sim_model.meta')
first_bias_tensor = new_sess.graph.get_tensor_by_name('conv2d/BiasAdd/ReadVariableOp:0')
first_bias_tensor_val = new_sess.run(first_bias_tensor)
self.assertTrue(np.any(first_bias_tensor_val == 1))
all_op_types = [op.type for op in new_sess.graph.get_operations()]
self.assertNotIn('QcQuantize', all_op_types)
sess.close()
sim.session.close()
del sim
def test_save_load_ckpt_cpu_model(self):
"""
Create QuantSim for a CPU model, test save and load on a quantsim model.
"""
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, [model.input.op.name], [model.output.op.name], use_cuda=False)
# save quantsim model
save_checkpoint(sim, './test_3', 'orig_quantsim_model')
new_quantsim = load_checkpoint('./test_3', 'orig_quantsim_model')
# validations
assert(sim is not new_quantsim)
self.assertTrue(new_quantsim.session is not None)
self.assertTrue(new_quantsim._quant_scheme == sim._quant_scheme)
self.assertTrue(new_quantsim._rounding_mode == sim._rounding_mode)
self.assertTrue(new_quantsim._use_cuda == sim._use_cuda)
self.assertTrue(len(new_quantsim._param_quantizers) == len(sim._param_quantizers))
self.assertTrue(len(new_quantsim._activation_quantizers) == len(sim._activation_quantizers))
for quantize_op in new_quantsim._param_quantizers:
self.assertFalse(sim._param_quantizers[quantize_op].session ==
new_quantsim._param_quantizers[quantize_op].session)
self.assertTrue(sim._param_quantizers[quantize_op].tensor_quantizer.getQuantScheme() ==
new_quantsim._param_quantizers[quantize_op].tensor_quantizer.getQuantScheme())
self.assertTrue(sim._param_quantizers[quantize_op].tensor_quantizer.roundingMode ==
new_quantsim._param_quantizers[quantize_op].tensor_quantizer.roundingMode)
self.assertFalse(sim._param_quantizers[quantize_op].tensor_quantizer.isEncodingValid)
self.assertFalse(new_quantsim._param_quantizers[quantize_op].tensor_quantizer.isEncodingValid)
for quantize_op in new_quantsim._activation_quantizers:
self.assertFalse(sim._activation_quantizers[quantize_op].session ==
new_quantsim._activation_quantizers[quantize_op].session)
self.assertTrue(sim._activation_quantizers[quantize_op].tensor_quantizer.getQuantScheme() ==
new_quantsim._activation_quantizers[quantize_op].tensor_quantizer.getQuantScheme())
self.assertTrue(sim._activation_quantizers[quantize_op].tensor_quantizer.roundingMode ==
new_quantsim._activation_quantizers[quantize_op].tensor_quantizer.roundingMode)
self.assertFalse(sim._activation_quantizers[quantize_op].tensor_quantizer.isEncodingValid)
self.assertFalse(new_quantsim._activation_quantizers[quantize_op].tensor_quantizer.isEncodingValid)
# remove the old quant sim reference and session
# to test that everything is loaded correctly on new quantsim including tensor quantizer references
sim.session.close()
del sim
# delete temp folder created and close sessions
shutil.rmtree('./test_3')
sess.close()
new_quantsim.session.close()
del new_quantsim
def test_save_load_ckpt_after_compute_encoding_on_orig_object(self):
"""
Create QuantSim for a CPU model, test save and load on a quantsim model
when encodings have been computed on original quantsim object
"""
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, [model.input.op.name], [model.output.op.name], use_cuda=False)
def dummy_forward_pass(n_sess, args):
model_output = n_sess.graph.get_tensor_by_name(model.output.name)
model_output = model_output.consumers()[0].outputs[0]
model_input = n_sess.graph.get_tensor_by_name(model.input.name)
dummy_input = np.random.randn(20, 28, 28, 3)
n_sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
# save quantsim model
save_checkpoint(sim, './test_3', 'orig_quantsim_model')
new_quantsim = load_checkpoint('./test_3', 'orig_quantsim_model')
# validations
assert(sim is not new_quantsim)
# as we have performed computeEncodings() on saved quantsim object, these must be set to True/False
# in loaded quantsim object as on orig model
for quantize_op in new_quantsim._param_quantizers:
self.assertTrue(new_quantsim._param_quantizers[quantize_op].tensor_quantizer.isEncodingValid ==
sim._param_quantizers[quantize_op].tensor_quantizer.isEncodingValid)
self.assertTrue(new_quantsim._param_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_min) ==
sim._param_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_min))
self.assertTrue(new_quantsim._param_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_max) ==
sim._param_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_max))
for quantize_op in new_quantsim._activation_quantizers:
self.assertTrue(new_quantsim._activation_quantizers[quantize_op].tensor_quantizer.isEncodingValid ==
sim._activation_quantizers[quantize_op].tensor_quantizer.isEncodingValid)
self.assertTrue(new_quantsim._activation_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_min) ==
sim._activation_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_min))
self.assertTrue(new_quantsim._activation_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_max) ==
sim._activation_quantizers[quantize_op].
get_variable_from_op(QuantizeOpIndices.encoding_max))
# delete temp folder created and close sessions
shutil.rmtree('./test_3')
sess.close()
sim.session.close()
new_quantsim.session.close()
del sim
del new_quantsim
def test_set_get_quantizer_params_using_properties(self):
"""
Create QuantSim for a CPU model, test param read and write using properties
"""
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, [model.input.op.name], [model.output.op.name], use_cuda=False)
p_quantizer = sim.quantizer_config('conv2d/Conv2D/ReadVariableOp_quantized')
o_quantizer = sim.quantizer_config('conv2d/Relu_quantized')
bias_quantizer = sim.quantizer_config('conv2d/BiasAdd/ReadVariableOp_quantized')
# check if __str__ can print the object info
print(p_quantizer)
bitwidth = p_quantizer.bitwidth
self.assertEqual(8, bitwidth)
p_quantizer.bitwidth = 6
bitwidth = p_quantizer.bitwidth
self.assertEqual(6, bitwidth)
bitwidth = o_quantizer.bitwidth
self.assertEqual(8, bitwidth)
o_quantizer.bitwidth = 6
bitwidth = o_quantizer.bitwidth
self.assertEqual(6, bitwidth)
sym_encoding = bias_quantizer.use_symmetric_encoding
self.assertFalse(sym_encoding)
bias_quantizer.use_symmetric_encoding = True
sym_encoding = bias_quantizer.use_symmetric_encoding
self.assertTrue(sym_encoding)
rounding_mode = o_quantizer.rounding_mode
self.assertEqual(libpymo.RoundingMode.ROUND_NEAREST, rounding_mode)
o_quantizer.rounding_mode = libpymo.RoundingMode.ROUND_STOCHASTIC
rounding_mode = o_quantizer.rounding_mode
self.assertEqual(libpymo.RoundingMode.ROUND_STOCHASTIC, rounding_mode)
quant_scheme = o_quantizer.quant_scheme
self.assertEqual(libpymo.QuantizationMode.QUANTIZATION_TF_ENHANCED, quant_scheme)
o_quantizer.quant_scheme = QuantScheme.post_training_tf
quant_scheme = o_quantizer.quant_scheme
self.assertEqual(libpymo.QuantizationMode.QUANTIZATION_TF, quant_scheme)
self.assertFalse(o_quantizer.tensor_quantizer.isEncodingValid)
is_enabled = p_quantizer.enabled
self.assertTrue(is_enabled)
p_quantizer.enabled = False
is_enabled = p_quantizer.enabled
self.assertFalse(is_enabled)
sim.session.close()
del sim
def test_manual_quantize(self):
""" Test quantizing a model by manually specifying ops to quantize """
def get_manual_activations(_graph, _starting_ops, _ending_ops):
"""
Overriding function for getting a list of ops to insert activation quantizers for
:param _graph: Unused argument
:param _starting_ops: Unused argument
:param _ending_ops: Unused argument
:return: List of ops to insert activation quantizers for, None for placeholder
"""
return ['conv2d/Relu'], None
def get_manual_params(_graph, _starting_ops, _ending_ops):
"""
Overriding function for getting a list of ops to insert param quantizers for
:param _graph: Unused argument
:param _starting_ops: Unused argument
:param _ending_ops: Unused argument
:return: List of ops to insert param quantizers for, and list of param indices for these ops
"""
return ['conv2d_1/Conv2D'], [1]
def configure_quantization_ops(self, _conn_graph, _ops_with_param_names, _indices, _activation_op_names,
_config_file):
"""
Overriding function for configuring quantization ops inserted by QuantizationSimModel
:param self: Self refers to QuantizationSimModel object
:param _conn_graph: Unused argument
:param _ops_with_param_names: Unused argument
:param _indices: Unused argument
:param _activation_op_names: Unused argument
:param _config_file: Unused argument
"""
conv2d_relu_quant_info = self._activation_quantizers['conv2d/Relu_quantized']
conv2d_relu_quant_info.enabled = False
conv2d_relu_quant_info.enabled = True
conv2d_1_weight_quant_info = self._param_quantizers['conv2d_1/Conv2D/ReadVariableOp_quantized']
conv2d_1_weight_quant_info.enabled = False
conv2d_1_weight_quant_info.enabled = True
tf.compat.v1.reset_default_graph()
with tf.device('/cpu:0'):
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=3, input_shape=(28, 28, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Conv2D(64, kernel_size=3, activation='relu'))
model.summary()
sess = tf.compat.v1.Session()
initialize_uninitialized_vars(sess)
orig_get_ops_to_quantize_activations_for = QuantizationSimModel._get_ops_to_quantize_activations_for
orig_get_ops_to_quantize_weights_for = QuantizationSimModel._get_ops_to_quantize_params_for
orig_configure_quantization_ops = QuantizationSimModel.configure_quantization_ops
QuantizationSimModel._get_ops_to_quantize_activations_for = get_manual_activations
QuantizationSimModel._get_ops_to_quantize_params_for = get_manual_params
QuantizationSimModel.configure_quantization_ops = configure_quantization_ops
sim = QuantizationSimModel(sess, ['conv2d_input'], ['conv2d_1/Relu'], use_cuda=False)
self.assertEqual(1, len(sim._activation_quantizers))
self.assertEqual(1, len(sim._param_quantizers))
sess.close()
sim.session.close()
QuantizationSimModel._get_ops_to_quantize_activations_for = orig_get_ops_to_quantize_activations_for
QuantizationSimModel._get_ops_to_quantize_params_for = orig_get_ops_to_quantize_weights_for
QuantizationSimModel.configure_quantization_ops = orig_configure_quantization_ops
sim.session.close()
del sim
def test_skip_quantizing_dtype_int(self):
""" Test that op with dtype int32 is skipped during quantization """
tf.compat.v1.reset_default_graph()
with tf.compat.v1.Session() as sess:
_ = model_with_dtype_int()
initialize_uninitialized_vars(sess)
sim = QuantizationSimModel(sess, ['input_1', 'input_2'], ['model_with_dtype_int/Softmax'], use_cuda=False)
self.assertEqual(6, len(sim._activation_quantizers))
self.assertTrue('input_1_quantized' not in sim._activation_quantizers)
self.assertTrue('input_2_quantized' in sim._activation_quantizers)
sim.session.close()
del sim
def test_insert_quant_op_recurrent(self):
""" test insertion of quant ops to recurrent layer with conditional blocks """
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer with 12 internal units.
# Add an RNN layer
x = tf.keras.layers.SimpleRNN(12)(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="simplernn_model")(x)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
ops = sess.graph.get_operations()
quant_op_inside_while_block_name = "simple_rnn/while/MatMul/ReadVariableOp_quantized"
self.assertFalse(quant_op_inside_while_block_name in [op.name for op in ops])
# construct a quantization sim model
sim = QuantizationSimModel(sess, ['input_1'], ['simplernn_model/Softmax'], use_cuda=False)
# get ops and make sure we have a quantized op added to the conditional block
ops = sim.session.graph.get_operations()
self.assertTrue(quant_op_inside_while_block_name in [op.name for op in ops])
sim.session.close()
del sim
def test_compute_encodings(self):
""" Test that ops not evaluated during compute encodings are set to passThrough mode. """
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
test_inp = np.ndarray((1, 32, 32, 3))
def dummy_forward_func(sess, _):
input_tensor = sess.graph.get_tensor_by_name('input_1:0')
output_tensor = sess.graph.get_tensor_by_name('flatten/Reshape:0')
sess.run(output_tensor, feed_dict={input_tensor: test_inp})
with sess.as_default():
_ = keras_model_functional()
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
sim = QuantizationSimModel(sess, ['input_1'], ['keras_model_functional/Softmax'])
sim.compute_encodings(dummy_forward_func, None)
for name, quant_info in sim._activation_quantizers.items():
if name in ['keras_model_functional/Softmax_quantized', 'keras_model_functional/BiasAdd_quantized']:
# Check that quantizers after op evaluated in compute_encodings are in passThrough (3) mode
self.assertEqual(quant_info.get_op_mode(), 3)
self.assertFalse(quant_info.tensor_quantizer.isEncodingValid)
elif name in ['scope_1/conv2d_3/BiasAdd_quantized']:
# Check that passThrough quantizers remain as passThrough (3)
self.assertEqual(quant_info.get_op_mode(), 3)
self.assertFalse(quant_info.tensor_quantizer.isEncodingValid)
else:
# Check that all other quantizers are in quantizeDequantize (2) mode
self.assertEqual(quant_info.get_op_mode(), 2)
self.assertTrue(quant_info.tensor_quantizer.isEncodingValid)
input_tensor = sim.session.graph.get_tensor_by_name('input_1:0')
output_tensor = sim.session.graph.get_tensor_by_name('keras_model_functional/Softmax:0')
sim.session.run(output_tensor, feed_dict={input_tensor: test_inp})
sim.session.close()
del sim
def test_matmul_param_selection_lstm(self):
""" Test apis to select input params to MatMuls within LSTM for quantization """
tf.compat.v1.reset_default_graph()
sess = tf.compat.v1.Session()
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer with 12 internal units.
x = tf.keras.layers.LSTM(12, name='lstm0')(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="matmul0")(x)
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
# _ = tf.compat.v1.summary.FileWriter('./lstm', sess.graph)
matmul_with_split_inside_lstm = "lstm0/while/MatMul"
tf_split_op_in = sess.graph.get_operation_by_name("lstm0/while/split")
tf_matmul_with_split_inside_lstm = sess.graph.get_operation_by_name(matmul_with_split_inside_lstm)
param_in_through_split = sess.graph.get_tensor_by_name("lstm0/while/split/ReadVariableOp:0")
can_modify_op, param_in = QuantizationSimModel._get_op_to_modify_with_param_in(
tf_matmul_with_split_inside_lstm, 1)
self.assertEqual(can_modify_op, tf_split_op_in)
self.assertEqual(param_in, param_in_through_split)
matmul_with_slice_inside_lstm = "lstm0/while/MatMul_5"
tf_strided_slice_op_in = sess.graph.get_operation_by_name("lstm0/while/strided_slice_1")
tf_matmul_with_slice_inside_lstm = sess.graph.get_operation_by_name(matmul_with_slice_inside_lstm)
param_in_through_strided_slice = sess.graph.get_tensor_by_name("lstm0/while/ReadVariableOp_1:0")
can_modify_op, param_in = QuantizationSimModel._get_op_to_modify_with_param_in(
tf_matmul_with_slice_inside_lstm, 1)
self.assertEqual(can_modify_op, tf_strided_slice_op_in)
self.assertEqual(param_in, param_in_through_strided_slice)
sess.close()
def validate_simple_rnn_auto_insertion_and_forward_pass(self, sess):
"""
common api to validate auto quant node insertion and forward pass for simple rnn layer
:param sess: TensorFlow session
:return:
"""
np.random.seed(0)
tf.set_random_seed(0)
ops = sess.graph.get_operations()
matmul_param_quant_op_inside_while_block_name = "simple_rnn/while/MatMul/ReadVariableOp_quantized"
self.assertFalse(matmul_param_quant_op_inside_while_block_name in [op.name for op in ops])
# _ = tf.summary.FileWriter('./test_simple_rnn_keras', sess.graph)
# construct a quantization sim model
sim = QuantizationSimModel(sess, ['input_1'], ['simplernn_model/Softmax'], use_cuda=False)
# params that must have quantizers
matmul_2_param_quant_op_inside_while_block_name = "simple_rnn/while/MatMul_1/ReadVariableOp_quantized"
# check biasadd param quantizers are disabled
param_quantizers = sim._param_quantizers
for p_quantizer in param_quantizers.keys():
if 'BiasAdd' in p_quantizer:
p_quant_config = sim.quantizer_config(p_quantizer)
self.assertFalse(p_quant_config.enabled)
# activations with quantizers
activation_bias_add_op_inside_while_block_name = "simple_rnn/while/BiasAdd_quantized"
add_op_inside_while_block_name = "simple_rnn/while/add_quantized"
# these should not have activation quantizers
activation_matmul_op_inside_while_block_name = "simple_rnn/while/MatMul_quantized"
activation_matmul_2_op_inside_while_block_name = "simple_rnn/while/MatMul_1_quantized"
# get ops and make sure we have a quantized op added to the conditional block
quantized_graph_op_names = self._get_quant_ops_from_tf_graph(sim.session.graph)
# while block ops
# bias and kernel quantizers
self.assertTrue(matmul_param_quant_op_inside_while_block_name in quantized_graph_op_names)
self.assertTrue(matmul_2_param_quant_op_inside_while_block_name in quantized_graph_op_names)
# output quantizers
self.assertFalse(activation_bias_add_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(add_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_2_op_inside_while_block_name in quantized_graph_op_names)
# check for input quantizers
input_matmul_op_inside_while_block_name = "simple_rnn/while/TensorArrayReadV3_quantized"
input_matmul_2_op_inside_while_block_name = "simple_rnn/while/Identity_2_quantized"
self.assertTrue(input_matmul_op_inside_while_block_name in quantized_graph_op_names)
self.assertTrue(input_matmul_2_op_inside_while_block_name in quantized_graph_op_names)
# validate encodings
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('simplernn_model/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
dummy_input = np.random.randn(16, 3, 100)
sess.run(model_output, feed_dict={model_input: dummy_input})
def eval(sess, input_tensor):
model_output = sess.graph.get_tensor_by_name('simplernn_model/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
out = sess.run(model_output, feed_dict={model_input: input_tensor})
return out
sim.compute_encodings(dummy_forward_pass, None)
random_tensor = np.random.randn(16, 3, 100)
orig_out = eval(sess, random_tensor)
sim.compute_encodings(dummy_forward_pass, None)
# check encoding min and max got updated
with sim.session.graph.as_default():
quantized_out = eval(sim.session, random_tensor)
# check quantized output with orig output
self.assertFalse(np.allclose(orig_out, quantized_out))
# close tf sessions
sess.close()
sim.session.close()
del sim
def test_insert_quant_op_forward_pass_simple_rnn(self):
""" test insertion of quant ops to recurrent layer with conditional blocks """
tf.reset_default_graph()
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.Session()
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer
x = tf.keras.layers.SimpleRNN(12,
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal')(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="simplernn_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
self.validate_simple_rnn_auto_insertion_and_forward_pass(sess)
sess.close()
def test_insert_quant_op_forward_pass_simple_rnn_with_relu(self):
""" test insertion of quant ops to simple rnn with relu """
tf.reset_default_graph()
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.Session()
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer
x = tf.keras.layers.SimpleRNN(12, activation='relu',
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal')(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="simplernn_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
self.validate_simple_rnn_auto_insertion_and_forward_pass(sess)
sess.close()
def test_insert_quant_op_forward_pass_simple_rnn_multiple_layers(self):
""" test insertion of quant ops to simple rnn with multiple layes """
tf.reset_default_graph()
np.random.seed(0)
tf.set_random_seed(0)
sess = tf.Session()
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer
x = tf.keras.layers.SimpleRNN(12, activation='tanh',
kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal',
return_sequences=True)(inputs)
x = tf.keras.layers.SimpleRNN(12, name='rnn1', activation='relu', return_sequences=True)(x)
x = tf.keras.layers.SimpleRNN(12, name='rnn2', activation='tanh')(x)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="simplernn_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
self.validate_simple_rnn_auto_insertion_and_forward_pass(sess)
# note - we will need to disable quantizers on identity nodes in this case
sess.close()
def test_backward_pass_time_taken_simple_rnn(self, is_quantized=True, iterations=10, time_steps=1):
""" perform backward pass with quantized simple RNN block"""
tf.reset_default_graph()
sess = tf.Session()
np.random.seed(0)
tf.set_random_seed(0)
batches = 16
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(1, 100))
# Add an RNN layer with 12 internal units.
x = tf.keras.layers.SimpleRNN(12, kernel_initializer='glorot_uniform',
recurrent_initializer='orthogonal')(inputs)
_ = tf.keras.layers.Dense(10, activation=tf.nn.softmax,
name="simplernn_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
curr_sess = sess
if is_quantized:
sim = QuantizationSimModel(sess, ['input_1'], ['simplernn_model/Softmax'], use_cuda=False)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('simplernn_model/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
dummy_input = np.random.randn(batches, 1, 100)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
curr_sess = sim.session
inp_tensor = curr_sess.graph.get_tensor_by_name('input_1:0')
np.random.seed(0)
w_shape = inp_tensor.shape
inp_data = np.random.rand(batches, w_shape[1], w_shape[2])
logits = curr_sess.graph.get_tensor_by_name('simplernn_model/MatMul:0')
labels = np.random.randint(10, size=batches)
one_hot_labels = np.eye(10)[labels]
with curr_sess.graph.as_default():
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
labels_placeholder = tf.placeholder(tf.float32, [None, 10], name='labels')
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels_placeholder, logits=logits)
update_ops = []
global_step = tf.train.create_global_step()
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-3)
gradients = optimizer.compute_gradients(loss, var_list)
init_global = tf.global_variables_initializer()
init_local = tf.local_variables_initializer()
init = tf.group(init_global, init_local)
curr_sess.run(init)
grad_updates = optimizer.apply_gradients(gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(loss, name='train_op')
# start training
time_taken_by_default_grad = 0
for i in range(iterations):
start_time = time.perf_counter()
_ = curr_sess.run(train_op, feed_dict={inp_tensor: inp_data, labels_placeholder: one_hot_labels})
exec_time = time.perf_counter() - start_time
time_taken_by_default_grad = time_taken_by_default_grad + exec_time
default_grad_avg_time = time_taken_by_default_grad / iterations
# close session
sess.close()
if is_quantized:
sim.session.close()
del sim
return default_grad_avg_time
# Keeping this disabled, this is for study purpose
def _test_compare_simple_rnn_training_processing_time_increase(self):
"""
Test to compare time taken by simple rnn node quantized versus no quantization
There is no validation criterion for this test. It is only for study.
:return:
"""
# compare with and without quantize nodes
itr = 1
no_quant_simple_rnn_train_avg_time = self.test_backward_pass_time_taken_simple_rnn(is_quantized=False,
iterations=itr)
quantized_simple_rnn_train_avg_time = self.test_backward_pass_time_taken_simple_rnn(is_quantized=True,
iterations=itr)
print('\nquantized_simple_rnn_train_avg_time = ', quantized_simple_rnn_train_avg_time)
print('\nno_quant_simple_rnn_train_avg_time = ', no_quant_simple_rnn_train_avg_time)
print(' There is a ', ((quantized_simple_rnn_train_avg_time - no_quant_simple_rnn_train_avg_time)
/ no_quant_simple_rnn_train_avg_time),
'x increase in processing time with quant nodes in rnn block')
def validate_internal_lstm_quantisim_nodes(self, quantized_graph_op_names, block_name='lstm',
is_stacked=False, is_time_major=False):
"""
Given a list of quantized_graph_op_names, this is utility function to validate
the quantisim nodes are properly inserted
:return:
"""
# params that must have quantizers
bias_param_quant_op_inside_while_block_name = block_name + "/while/split_1/ReadVariableOp_quantized"
kernel_param_quant_op_inside_while_block_name = block_name + "/while/split/ReadVariableOp_quantized"
recurrent_kenel_param_quant_op_inside_while_block_name_cp1 = block_name + "/while/ReadVariableOp_quantized"
recurrent_kenel_param_quant_op_inside_while_block_name_cp2 = block_name + "/while/ReadVariableOp_1_quantized"
recurrent_kenel_param_quant_op_inside_while_block_name_cp3 = block_name + "/while/ReadVariableOp_2_quantized"
recurrent_kenel_param_quant_op_inside_while_block_name_cp4 = block_name + "/while/ReadVariableOp_3_quantized"
# these should not have activation quantizers
activation_matmul_op_inside_while_block_name = block_name + "/while/MatMul_quantized"
activation_matmul_1_op_inside_while_block_name = block_name + "/while/MatMul_1_quantized"
activation_matmul_2_op_inside_while_block_name = block_name + "/while/MatMul_2_quantized"
activation_matmul_3_op_inside_while_block_name = block_name + "/while/MatMul_3_quantized"
activation_matmul_4_op_inside_while_block_name = block_name + "/while/MatMul_4_quantized"
activation_matmul_5_op_inside_while_block_name = block_name + "/while/MatMul_5_quantized"
activation_matmul_6_op_inside_while_block_name = block_name + "/while/MatMul_6_quantized"
activation_matmul_7_op_inside_while_block_name = block_name + "/while/MatMul_7_quantized"
activation_bias_add_op_inside_while_block_name = block_name + "/while/BiasAdd_quantized"
activation_bias_add_1_op_inside_while_block_name = block_name + "/while/BiasAdd_1_quantized"
activation_bias_add_2_op_inside_while_block_name = block_name + "/while/BiasAdd_2_quantized"
activation_bias_add_3_op_inside_while_block_name = block_name + "/while/BiasAdd_3_quantized"
activation_add_op_inside_while_block_name = block_name + "/while/add_quantized"
activation_add_2_op_inside_while_block_name = block_name + "/while/add_2_quantized"
activation_add_4_op_inside_while_block_name = block_name + "/while/add_4_quantized"
activation_add_6_op_inside_while_block_name = block_name + "/while/add_6_quantized"
activation_mul_op_inside_while_block_name = block_name + "/while/Mul_quantized"
activation_mul_1_op_inside_while_block_name = block_name + "/while/Mul_1_quantized"
activation_mul_4_op_inside_while_block_name = block_name + "/while/Mul_4_quantized"
activation_add_1_op_inside_while_block_name = block_name + "/while/Add_1_quantized"
activation_add_3_op_inside_while_block_name = block_name + "/while/Add_3_quantized"
activation_add_7_op_inside_while_block_name = block_name + "/while/Add_7_quantized"
activation_mul_2_op_inside_while_block_name = block_name + "/while/mul_2_quantized"
activation_mul_3_op_inside_while_block_name = block_name + "/while/mul_3_quantized"
activation_mul_5_op_inside_while_block_name = block_name + "/while/mul_5_quantized"
activation_add_5_op_inside_while_block_name = block_name + "/while/add_5_quantized"
# while block ops
# bias and kernel quantizers
self.assertTrue(bias_param_quant_op_inside_while_block_name in quantized_graph_op_names)
self.assertTrue(kernel_param_quant_op_inside_while_block_name in quantized_graph_op_names)
self.assertTrue(recurrent_kenel_param_quant_op_inside_while_block_name_cp1 in quantized_graph_op_names)
self.assertTrue(recurrent_kenel_param_quant_op_inside_while_block_name_cp2 in quantized_graph_op_names)
self.assertTrue(recurrent_kenel_param_quant_op_inside_while_block_name_cp3 in quantized_graph_op_names)
self.assertTrue(recurrent_kenel_param_quant_op_inside_while_block_name_cp4 in quantized_graph_op_names)
# output quantizers: no activation quantizer is added for eAI
# activations that are not quantized
self.assertFalse(activation_matmul_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_1_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_2_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_3_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_4_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_5_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_6_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_matmul_7_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_bias_add_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_bias_add_1_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_bias_add_2_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_bias_add_3_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_1_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_2_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_3_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_4_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_5_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_6_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_add_7_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_mul_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_mul_1_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_mul_2_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_mul_3_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_mul_4_op_inside_while_block_name in quantized_graph_op_names)
self.assertFalse(activation_mul_5_op_inside_while_block_name in quantized_graph_op_names)
# check for input quantizers
input_x_op_inside_while_block_name = block_name + "/while/TensorArrayReadV3_quantized"
input_h_op_inside_while_block_name = block_name + "/while/Identity_2_quantized"
self.assertTrue(input_x_op_inside_while_block_name in quantized_graph_op_names)
self.assertTrue(input_h_op_inside_while_block_name in quantized_graph_op_names)
# check for input quantizer in stacked mode
if is_stacked:
if is_time_major:
input_h_op_pass_to_last_lstm_name = block_name + "/TensorArrayStack/TensorArrayGatherV3_quantized"
else:
input_h_op_pass_to_last_lstm_name = block_name + "/transpose_1_quantized"
self.assertTrue(input_h_op_pass_to_last_lstm_name in quantized_graph_op_names)
def validate_general_lstm_forward_pass_and_encoding(self, sess, sim,
num_activation_quantizer=6, num_param_quantizer=8):
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('lstm_model/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
dummy_input = np.random.randn(16, 3, 100)
sess.run(model_output, feed_dict={model_input: dummy_input})
def eval(sess, input_tensor):
model_output = sess.graph.get_tensor_by_name('lstm_model/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
out = sess.run(model_output, feed_dict={model_input: input_tensor})
return out
sim.compute_encodings(dummy_forward_pass, None)
random_tensor = np.random.randn(16, 3, 100)
orig_out = eval(sess, random_tensor)
activation_quantizers = sim._activation_quantizers
param_quantizers = sim._param_quantizers
# check the number of quantizers
self.assertEqual(len(activation_quantizers), num_activation_quantizer)
# kernel, recurrent kernelx4, bias
# one bias and kernel of dense layer MatMul
self.assertEqual(len(param_quantizers), num_param_quantizer)
# Check if encodings have been calculated
for name, quantizer in activation_quantizers.items():
if quantizer.enabled:
self.assertTrue(quantizer.tensor_quantizer.isEncodingValid,
"enabled quantizer: {} does not have a valid encoding set ".format(name))
# check encoding min and max got updated
with sim.session.graph.as_default():
quantized_out = eval(sim.session, random_tensor)
# quantized moddel output is different from orig model
self.assertFalse(np.allclose(orig_out, quantized_out))
def test_quantize_lstm_default_quantsim_and_forward_pass(self):
""" Test connected graph construction on a model with lstm op """
tf.reset_default_graph()
sess = tf.Session()
np.random.seed(0)
tf.set_random_seed(0)
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add a LSTM layer with 12 internal units.
x = tf.keras.layers.LSTM(12)(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="lstm_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
# _ = tf.summary.FileWriter('./lstm', sess.graph)
sim = QuantizationSimModel(sess, ['input_1'], ['lstm_model/Softmax'],
use_cuda=False)
# validate quantsim
# get ops and make sure we have a quantized op added to the conditional block
quantized_graph_op_names = self._get_quant_ops_from_tf_graph(sim.session.graph)
self.validate_internal_lstm_quantisim_nodes(quantized_graph_op_names)
# validate forward pass
self.validate_general_lstm_forward_pass_and_encoding(sess, sim)
# close tf sessions
sess.close()
sim.session.close()
del sim
#
def test_quantize_simple_rnn_export(self):
""" Test model export for recurrent models """
tf.reset_default_graph()
sess = tf.Session()
np.random.seed(0)
tf.set_random_seed(0)
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer with 12 internal units.
x = tf.keras.layers.SimpleRNN(10, name='rnn1', return_sequences=True)(inputs)
x = tf.keras.layers.SimpleRNN(10, name='rnn2')(x)
_ = tf.keras.layers.Dense(10, activation=tf.nn.softmax,
name="fc")(x)
init = tf.global_variables_initializer()
sess.run(init)
sim = QuantizationSimModel(sess, ['input_1'], ['fc/Softmax'],
use_cuda=False)
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('fc/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
dummy_input = np.random.randn(1, 3, 100)
sess.run(model_output, feed_dict={model_input: dummy_input})
sim.compute_encodings(dummy_forward_pass, None)
sim.export('./data', 'rnn_quantsim')
new_sess = load_model_from_meta('./data/rnn_quantsim.meta')
dummy_forward_pass(new_sess, None)
all_op_types = [op.type for op in new_sess.graph.get_operations()]
self.assertNotIn('QcQuantize', all_op_types)
self.assertNotIn('QcQuantizeRecurrentParam', all_op_types)
# Load the encodings file to check if the encodings were exported correctly
with open("./data/rnn_quantsim.encodings", "r") as encodings_file:
encodings = json.load(encodings_file)
self.assertEqual(8, len(encodings['activation_encodings']))
self.assertEqual(5, len(encodings['param_encodings']))
# close tf sessions
sess.close()
sim.session.close()
del sim
def _get_quant_ops_from_tf_graph(self, gr: tf.Graph):
"""
utility to get quant op names in given graph
:param graph: tf.Graph
:return:
"""
ops = gr.get_operations()
quantized_graph_op_names = [op.name for op in ops if op.type in ["QcQuantize", "QcQuantizeRecurrentParam"]]
return quantized_graph_op_names
def test_quantize_simple_rnn_save_and_load_checkpoint(self):
""" Test model export for recurrent models """
tf.reset_default_graph()
sess = tf.Session()
np.random.seed(0)
tf.set_random_seed(0)
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add an RNN layer with 12 internal units.
x = tf.keras.layers.SimpleRNN(10, name='rnn1', return_sequences=True)(inputs)
x = tf.keras.layers.SimpleRNN(10, name='rnn2')(x)
_ = tf.keras.layers.Dense(10, activation=tf.nn.softmax,
name="fc")(x)
init = tf.global_variables_initializer()
sess.run(init)
sim = QuantizationSimModel(sess, ['input_1'], ['fc/Softmax'],
use_cuda=False)
def eval(sess, input_tensor):
model_output = sess.graph.get_tensor_by_name('fc/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
out = sess.run(model_output, feed_dict={model_input: input_tensor})
return out
def dummy_forward_pass(sess, args):
dummy_input = np.random.randn(1, 3, 100)
eval(sess, dummy_input)
sim.compute_encodings(dummy_forward_pass, None)
random_tensor = np.random.randn(1, 3, 100)
old_out = eval(sim.session, random_tensor)
save_checkpoint(sim, './data/', 'simple_rnn_save')
new_sim = load_checkpoint('./data', 'simple_rnn_save')
# Check to make sure that inference through the new sim produces exactly the same output as the old sim
# This checks that quantization parameters have been restored correctly
# Also checks that we are able to invoke quantize-dequantize ops in the new session (so pymo objects were
# restored correctly etc.)
new_out = eval(new_sim.session, random_tensor)
self.assertTrue(np.allclose(old_out, new_out))
sim.session.close()
del sim
def test_quantize_lstm_sigmoid_quantsim_and_forward_pass(self):
""" Test connected graph construction on a model with lstm op """
tf.reset_default_graph()
sess = tf.Session()
np.random.seed(0)
tf.set_random_seed(0)
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add a LSTM layer with 12 internal units.
x = tf.keras.layers.LSTM(12, recurrent_activation='sigmoid')(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="lstm_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
# _ = tf.summary.FileWriter('./lstm', sess.graph)
sim = QuantizationSimModel(sess, ['input_1'], ['lstm_model/Softmax'],
use_cuda=False)
# validate quantsim
# get ops and make sure we have a quantized op added to the conditional block
quantized_graph_op_names = self._get_quant_ops_from_tf_graph(sim.session.graph)
self.validate_internal_lstm_quantisim_nodes(quantized_graph_op_names)
# validate forward pass
self.validate_general_lstm_forward_pass_and_encoding(sess, sim)
# close tf sessions
sess.close()
sim.session.close()
del sim
def test_quantize_lstm_time_major_true_quantsim_and_forward_pass(self):
""" Test connected graph construction on a model with lstm op """
tf.reset_default_graph()
sess = tf.Session()
np.random.seed(0)
tf.set_random_seed(0)
with sess.graph.as_default():
inputs = tf.keras.Input(shape=(3, 100))
# Add a LSTM layer with 12 internal units.
x = tf.keras.layers.LSTM(12, time_major=True, name='lstm_tm')(inputs)
_ = tf.keras.layers.Dense(12, activation=tf.nn.softmax,
name="lstm_model")(x)
init = tf.global_variables_initializer()
sess.run(init)
# _ = tf.summary.FileWriter('./lstm', sess.graph)
sim = QuantizationSimModel(sess, ['input_1'], ['lstm_model/Softmax'],
use_cuda=False)
# validate quantsim
# get ops and make sure we have a quantized op added to the conditional blocks
quantized_graph_op_names = self._get_quant_ops_from_tf_graph(sim.session.graph)
batches = 32
def dummy_forward_pass(sess, args):
model_output = sess.graph.get_tensor_by_name('lstm_model/Softmax:0')
model_input = sess.graph.get_tensor_by_name('input_1:0')
dummy_input = np.random.randn(batches, 3, 100)
sess.run(model_output, feed_dict={model_input: dummy_input})
self.validate_internal_lstm_quantisim_nodes(quantized_graph_op_names, 'lstm_tm')
# validate forward pass
self.validate_general_lstm_forward_pass_and_encoding(sess, sim)
# close tf sessions
sess.close()
sim.session.close()
del sim
def test_quantize_lstm_deepspeech_time_major_true_quantsim_and_forward_pass(self):
""" Test connected graph construction on a model with lstm op """
tf.reset_default_graph()
sess = tf.Session()
| np.random.seed(0) | numpy.random.seed |
import numpy as np
import pickle
import glob
import matplotlib.pyplot as plt
from tqdm.notebook import tqdm
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import r2_score
import ipdb
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Lasso
from sklearn.model_selection import train_test_split
from scipy.special import comb
def crop_to_window(u, n):
n0 = u.shape[1]
b = int((n0-n)//2)
return u[:, b:b+n]
def blocks(repeat_length, num_bits=13):
if repeat_locations == 0:
return [np.zeros(num_bits)]
num_blocks = num_bits - repeat_length + 1
patterns = []
for i in range(0, num_blocks):
pattern = np.zeros(num_bits)
pattern[i:i+repeat_length] = 1
patterns.append(pattern)
return patterns
def left_right_blocks(repeat_length, num_bits=13):
if repeat_length == 0:
return [np.zeros(num_bits)]
left_boundary = 0
right_boundary = int(num_bits//2)
num_blocks = right_boundary - repeat_length + 1
patterns = []
for i in range(0, num_blocks):
for j in range(0, num_blocks):
pattern = np.zeros(num_bits)
pattern[i:i+repeat_length] = 1
pattern[right_boundary+j:right_boundary+j+repeat_length] = 1
patterns.append(pattern)
return patterns
def repeat_locations(repeat_length, num_bits=13):
patterns = []
for i in range(0,repeat_length+1):
new_patterns = left_right_blocks(i, num_bits)
patterns += new_patterns
return patterns
def support_augmented_with_reversed(support):
support_set = set()
for s in support:
support_set.add(zo_to_string(s))
support_set_temp = set()
for s in support_set:
s_reversed = s[::-1]
if s_reversed not in support_set:
support_set_temp.add(s_reversed)
support_set = support_set | support_set_temp
support = []
for s in support_set:
support.append(string_to_zo(s))
return support
def next_string_with_same_num_ones(v):
t = (v | (v-1))+ 1
w = t | ((( (t & -t) // (v & -v) ) >> 1) - 1 )
return w
def all_strings_with_k_ones(bit_length,k):
num_total = int( comb(bit_length,k) )
c = 2**k - 1
my_list = []
for i in range(num_total):
my_list.append(c)
if i != num_total - 1:
c = next_string_with_same_num_ones(c)
return my_list
def all_strings_up_to_k_ones(bit_length,k):
my_list = []
for i in range(k+1):
my_list = my_list + all_strings_with_k_ones(bit_length,i)
return my_list
def all_strings_with_given_ones(bit_length, k_list):
my_list = []
for i in k_list:
my_list = my_list + all_strings_with_k_ones(bit_length,i)
return my_list
def synthetic_band_support(band_width, num_bits=13):
max_number = 2**(2*band_width)
rotate_length = num_bits//2 + band_width
support = []
for i in range(max_number):
binary_loc = dec_to_bin(i, num_bits)
binary_loc = np.roll(binary_loc, rotate_length)
support.append(binary_loc)
return support
def synthetic_band_support_capped_degree(band_width, degree_cap, num_bits=13):
assert band_width >= 0, "width needs to be non-negative"
assert degree_cap >= 0, "cap needs to be non-negative"
rotate_length = num_bits//2 + band_width
support = []
if isinstance(degree_cap, list):
all_strings = all_strings_with_given_ones(2*band_width, degree_cap)
else:
all_strings = all_strings_up_to_k_ones(2*band_width, degree_cap)
for s in all_strings:
binary_loc = dec_to_bin(s, num_bits)
binary_loc = np.roll(binary_loc, rotate_length)
support.append(binary_loc)
return support
def support_to_set(support):
support_set = set()
for s in support:
support_set.add(zo_to_string(s))
return support_set
def set_to_support(the_set):
locations = []
for loc in the_set:
locations.append(string_to_zo(loc))
return locations
def pm_to_zo(pm):
"""
Goes from plus-minus to zero-one
"""
zo = np.zeros_like(pm)
zo[pm < 0] = 1
return zo.astype(int)
def zo_to_pm(zo):
"""
Goes from plus-minus to zero-one
"""
return (-1)**zo
def zo_to_string(u):
return ''.join([str(i) for i in list(u)])
def string_to_zo(u):
return np.array([int(i) for i in list(u)])
def my_string_format(s):
N = len(s)
return s[:N//2] + ':' + s[N//2:]
def my_print_string(s):
print(my_string_format(s))
def random_binary_matrix(m, n, p=0.5):
A = np.random.binomial(1,p,size=(m,n))
return A
def dec_to_bin(x, num_bits):
assert x < 2**num_bits, "number of bits are not enough"
u = bin(x)[2:].zfill(num_bits)
u = list(u)
u = [int(i) for i in u]
return np.array(u)
def bin_to_dec(x):
n = len(x)
c = 2**(np.arange(n)[::-1])
return c.dot(x)
def bool2int(x):
y = 0
for i,j in enumerate(x):
y += j<<i
return y
def get_sampling_index(x, A, p=0):
"""
x: sampling index
A: subsampling matrix
p: delay
"""
num_bits = A.shape[0]
x = dec_to_bin(x, num_bits)
r = x.dot(A) + p
return r % 2
def get_random_binary_string(num_bits, p=0.5):
a = np.random.binomial(1,p,size=num_bits)
return a
def random_delay_pair(num_bits, target_bit):
"""
num_bits: number of bits
location_target: the targeted location (q in equation 26 in https://arxiv.org/pdf/1508.06336.pdf)
"""
e_q = 2**target_bit
e_q = dec_to_bin(e_q, num_bits)
random_seed = get_random_binary_string(num_bits)
return random_seed, (random_seed+e_q)%2
def make_delay_pairs(num_pairs, num_bits):
z = []
z.append(dec_to_bin(0,num_bits))
for bit_index in range(0, num_bits):
for pair_idx in range(num_pairs):
a,b = random_delay_pair(num_bits, bit_index)
z.append(a)
z.append(b)
return z
def myfwht(x):
"""A recursive implementation of the 1D Cooley-Tukey FFT"""
# x = np.asarray(x, dtype=float)
N = x.shape[0]
if N == 1:
return x
else:
X_even = myfwht(x[0:(N//2)])
X_odd = myfwht(x[(N//2):])
return np.concatenate([(X_even + X_odd),
(X_even - X_odd)])
def results_to_measurements(results):
measurement_matrix = np.zeros_like(results)
for i in range(results.shape[0]):
measurement_matrix[i] = myfwht(results[i])
return measurement_matrix
def get_delay_index_base(bit_index, delay_index, D):
return 1+ 2*D*bit_index + 2*delay_index
def estimate_location(u, num_bits=13, num_delays_per_bit=3):
location = []
for bit in range(num_bits):
sign_total = 0
for delay in range(num_delays_per_bit):
delay_0 = get_delay_index_base(bit, delay, num_delays_per_bit)
delay_1 = delay_0 + 1
r0 = u[delay_0]
r1 = u[delay_1]
sign_total += np.sign(r0)*np.sign(r1)
location.append(sign_total/num_delays_per_bit)
location = | np.array(location) | numpy.array |
import numpy as np
import math
import data
def angle_rotation(gui):
first_position = [gui.line_rot.LineCoords[0][1], gui.line_rot.LineCoords[0][0]]
second_position = [gui.line_rot.LineCoords[1][1], gui.line_rot.LineCoords[1][0]]
print('first position = ', first_position)
print('second position = ', second_position)
first_point = [first_position[1], -first_position[0]]
second_point = [second_position[1], -second_position[0]]
print('first point = ', first_point)
print('second point = ', second_point)
# x1 = math.sqrt((second_point[0] - first_point[0]) ** 2)
# x2 = math.sqrt((second_point[1] - first_point[1]) ** 2)
norm = math.sqrt((second_point[0] - first_point[0]) ** 2 + (second_point[1] - first_point[1]) ** 2)
horizontal_proj = (second_point[0] - first_point[0]) / norm
vertical_proj = (second_point[1] - first_point[1]) / norm
if vertical_proj >= 0:
theta = np.arccos(horizontal_proj)
else:
theta = - np.arccos(horizontal_proj)
print('theta = ', theta)
return theta
def rotate_tensor(datastruct, gui):
theta = angle_rotation(gui)
exx = data.SMGData.load(datastruct, 'Exx')
eyy = data.SMGData.load(datastruct, 'Eyy')
exy = data.SMGData.load(datastruct, 'Exy')
rxy = data.SMGData.load(datastruct, 'Rxy')
epsilon = np.array([[exx, exy], [exy, eyy]])
omega = np.array([[np.zeros(rxy.shape), rxy], [-rxy, | np.zeros(rxy.shape) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 13:54:55 2020
@author: akurnizk
"""
import os
import hydroeval
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime #parse the datetimes we get from NOAA
from matplotlib import pylab
from scipy.optimize import fsolve
from pytides.tide import Tide
import seaborn as sns; sns.set(font_scale=2)
import matplotlib as mpl
mpl.rc('xtick', labelsize=22)
mpl.rc('ytick', labelsize=22)
mpl.rcParams['pdf.fonttype'] = 42
map_dir = r'E:\Maps' # retrieved files from https://viewer.nationalmap.gov/basic/
data_dir = os.path.join('E:\Data')
#%% Load Data
"""
Dike + Herring River All
"""
# All Measured + Discharge Calculated off Measured
HR_all_resam_1hr_df = pd.read_csv(os.path.join(data_dir,"General Dike Data","HR_All_Data_Resampled_HourlyMeans_8272017-1212020.csv"))
data_cols = HR_all_resam_1hr_df.columns.drop("datetime")
HR_all_resam_1hr_df[data_cols] = HR_all_resam_1hr_df[data_cols].apply(pd.to_numeric, errors='coerce')
HR_all_resam_1hr_df["datetime"] = pd.to_datetime(HR_all_resam_1hr_df["datetime"])
# WF Harbor, HR Predicted
pred_to_2100_dtHRocean_df = pd.read_csv(os.path.join(data_dir,"General Dike Data","Dike_Data_HourlyPred_111946_12312100.csv"))
data_cols_max = pred_to_2100_dtHRocean_df.columns.drop("datetime")
pred_to_2100_dtHRocean_df[data_cols_max] = pred_to_2100_dtHRocean_df[data_cols_max].apply(pd.to_numeric, errors='coerce')
pred_to_2100_dtHRocean_df["datetime"] = pd.to_datetime(pred_to_2100_dtHRocean_df["datetime"])
# CNR U/S, High Toss Predicted
pred_to_2100_CNRUS_HT_df = pd.read_csv(os.path.join(data_dir,"General Dike Data","CNRUS_HT_HourlyPred_111946_12312100.csv"))
data_cols_min = pred_to_2100_CNRUS_HT_df.columns.drop("datetime")
pred_to_2100_CNRUS_HT_df[data_cols_min] = pred_to_2100_CNRUS_HT_df[data_cols_min].apply(pd.to_numeric, errors='coerce')
pred_to_2100_CNRUS_HT_df["datetime"] = pd.to_datetime(pred_to_2100_CNRUS_HT_df["datetime"])
# Discharge Calculated off Predicted
Q_dike_df = pd.read_csv(os.path.join(data_dir,"General Dike Data","Dike_Discharge_Calc_HourlyPred_111946_12312100.csv"))
data_cols_min = Q_dike_df.columns.drop("datetime")
Q_dike_df[data_cols_min] = Q_dike_df[data_cols_min].apply(pd.to_numeric, errors='coerce')
Q_dike_df["datetime"] = pd.to_datetime(Q_dike_df["datetime"])
"""
Channel Geometry
"""
out_x_stacked = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_xcoords.csv'), delimiter=',')
out_y_stacked = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_ycoords.csv'), delimiter=',')
elevs_interp = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_elevs.csv'), delimiter=',')
intersect_newxy = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_inscts.csv'), delimiter=',')
min_dist_dx = np.loadtxt(os.path.join(map_dir, 'HR_XsecLines','HR_xsec_all_dx.csv'), delimiter=',')
# make top of array the upstream-most section?
out_x_stacked = np.flip(out_x_stacked,axis=0)
out_y_stacked = np.flip(out_y_stacked,axis=0)
elevs_interp = np.flip(elevs_interp,axis=0)
intersect_newxy = np.flip(intersect_newxy,axis=0)
min_dist_dx = np.flip(min_dist_dx,axis=0)
# High Toss
out_x_HT = out_x_stacked[0]
out_y_HT = out_y_stacked[0]
elev_HT = elevs_interp[0]
intersect_HT = intersect_newxy[0]
# CNR U/S
out_x_CNR = out_x_stacked[-1]
out_y_CNR = out_y_stacked[-1]
elev_CNR = elevs_interp[-1]
intersect_CNR = intersect_newxy[-1]
#%% Plot of Measured, and Dike Q Calcs with Measured
ax = HR_all_resam_1hr_df.plot.scatter(x="datetime", y="Gage height, m, Ocean side", color='LightBlue', label = 'Gage height, m , Ocean side')
HR_all_resam_1hr_df.plot.scatter(x="datetime", y="Gage height, m, HR side", color='LightGreen', label = 'Gage height, m , HR side', ax=ax)
HR_all_resam_1hr_df.plot.scatter(x="datetime", y="Discharge, cms", color='Turquoise', label = 'Discharge, cms', ax=ax)
HR_all_resam_1hr_df.plot.scatter(x="datetime", y="CNR U/S Water Level, NAVD88", color='DarkGreen', label = 'Water Level, m, CNR U/S', ax=ax)
HR_all_resam_1hr_df.plot.scatter(x="datetime", y="Dog Leg Water Level, NAVD88", color='DarkRed', label = 'Water Level, m, Dog Leg', ax=ax)
HR_all_resam_1hr_df.plot.scatter(x="datetime", y="High Toss Water Level, NAVD88", color='DarkOrange', label = 'Water Level, m, High Toss', ax=ax)
HR_all_resam_1hr_df.plot.scatter(x="datetime", y="Discharge, Dike Calc, cms", color='DarkBlue', label = 'Dike Calculated Discharge, cms', ax=ax)
# Show X-axis major tick marks as dates
def DateAxisFmt(yax_label):
loc = mdates.AutoDateLocator()
plt.gca().xaxis.set_major_locator(loc)
plt.gca().xaxis.set_major_formatter(mdates.AutoDateFormatter(loc))
plt.gcf().autofmt_xdate()
plt.xlabel('Date', fontsize=22)
plt.ylabel(yax_label, fontsize=22)
ylabel_elev_disch = 'Elevation (m), Discharge (m^3/s)'
DateAxisFmt(ylabel_elev_disch)
plt.legend(loc='upper right')
#%% Remove NaNs from necessary data.
# For Measured Discharge
HR_disch_measend = HR_all_resam_1hr_df["Discharge, cms"].last_valid_index()
HR_all_meas_disch_df_slice = HR_all_resam_1hr_df.iloc[0:HR_disch_measend]
dt_CNR_HT_disch_cols = ['datetime','CNR U/S Water Level, NAVD88','High Toss Water Level, NAVD88','Discharge, cms']
HR_CNR_HT_disch_df_slice = HR_all_meas_disch_df_slice.filter(dt_CNR_HT_disch_cols, axis=1)
HR_CNR_HT_disch_df_slice.dropna(inplace=True) # Doesn't change anything...
# For Calculated Discharge
HR_disch_calcstrt = HR_all_resam_1hr_df["Discharge, Dike Calc, cms"].first_valid_index()
HR_disch_calcend = HR_all_resam_1hr_df["Discharge, Dike Calc, cms"].last_valid_index()
HR_all_calc_disch_df_slice = HR_all_resam_1hr_df.iloc[HR_disch_calcstrt:HR_disch_calcend]
dt_CNR_HT_calcdisch_cols = ['datetime','CNR U/S Water Level, NAVD88','High Toss Water Level, NAVD88','Discharge, Dike Calc, cms']
HR_CNR_HT_calcdisch_df_slice = HR_all_calc_disch_df_slice.filter(dt_CNR_HT_calcdisch_cols, axis=1)
HR_CNR_HT_calcdisch_df_slice.dropna(inplace=True)
HR_CNR_HT_calcdisch_df_slice.reset_index(drop=True, inplace=True)
# Make sure to re-merge with full time series!
#%% Starting conditions
grav = 9.81 # m/s^2
nsec = len(min_dist_dx) # number of spaces between xsecs
np11 = 2*nsec + 2
tlast = 89400*1 # time for transient flow computation (measurements are at 5 min (300s) intervals - use these?)
chl = | np.nansum(min_dist_dx) | numpy.nansum |
#!/usr/bin/env python3
import numpy as np
# import matplotlib.pyplot as plt
# from scipy.integrate import quad
# from scipy import interpolate
# import AMC
import mass_function
import NSencounter as NE
import perturbations as PB
import glob
try:
from tqdm import tqdm
except ImportError as err:
def tqdm(x):
return x
import argparse
import sys
import os
import re
import warnings
import params
# sys.path.append("../")
import dirs
if not os.path.exists(dirs.data_dir + "distributions/"):
os.makedirs(dirs.data_dir + "distributions/")
# The code in principle is parallelised, but I wouldn't recommend it...
USING_MPI = False
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
MPI_size = comm.Get_size()
MPI_rank = comm.Get_rank()
if MPI_size > 1:
USING_MPI = True
except ImportError as err:
print(" mpi4py module not found: using a single process only...")
USING_MPI = False
MPI_size = 1
MPI_rank = 0
print(MPI_size, MPI_rank)
warnings.filterwarnings("error")
# This mass corresponds roughly to an axion decay
# constant of 3e11 and a confinement scale of Lambda = 0.076
in_maeV = params.m_a # axion mass in eV
in_gg = -0.7
print("> Using m_a = %.2e eV, gamma = %.2f" % (in_maeV, in_gg))
######################
#### OPTIONS ######
# Parse the arguments!
parser = argparse.ArgumentParser(description="...")
parser.add_argument(
"-profile",
"--profile",
help="Density profile for AMCs - `NFW` or `PL`",
type=str,
default="PL",
)
parser.add_argument(
"-unperturbed",
"--unperturbed",
help="Calculate for unperturbed profiles?",
type=bool,
default=False,
)
parser.add_argument(
"-max_rows",
"--max_rows",
help="Maximum number of rows to read from each file?",
type=int,
default=None,
)
parser.add_argument(
"-circ",
"--circular",
dest="circular",
action="store_true",
help="Use the circular flag to force e = 0 for all orbits.",
)
parser.add_argument(
"-AScut",
"--AScut",
dest="AScut",
action="store_true",
help="Include an axion star cut on the AMC properties.",
)
parser.add_argument(
"-mass_choice",
"--mass_choice",
help="Mass parameter = 'c' or 'a' for characteristic or average.",
type=str,
default="c",
)
parser.set_defaults(circular=False)
parser.set_defaults(AScut=False)
args = parser.parse_args()
UNPERTURBED = args.unperturbed
PROFILE = args.profile
CIRCULAR = args.circular
AS_CUT = args.AScut
max_rows = args.max_rows
MASS_CHOICE = args.mass_choice
circ_text = ""
if CIRCULAR:
circ_text = "_circ"
cut_text = ""
if AS_CUT:
print("> Calculating with axion-star cut...")
cut_text = "_AScut"
if MASS_CHOICE.lower() == "c":
M0 = mass_function.calc_Mchar(in_maeV)
elif MASS_CHOICE.lower() == "a":
AMC_MF = mass_function.PowerLawMassFunction(m_a=in_maeV, gamma=in_gg)
M0 = AMC_MF.mavg
if PROFILE == "NFW" and UNPERTURBED == False:
M0 = mass_function.mass_after_stripping(M0)
# Mass function
if PROFILE == "PL" or UNPERTURBED == True:
AMC_MF = mass_function.PowerLawMassFunction(m_a=in_maeV, gamma=in_gg)
elif PROFILE == "NFW":
AMC_MF = mass_function.StrippedPowerLawMassFunction(m_a=in_maeV, gamma=in_gg)
M_cut = 1e-29
# IDstr = "_ma_57mueV"
IDstr = params.IDstr
IDstr += "_delta_" + MASS_CHOICE.lower()
Nbins_mass = 300
Nbins_radius = 500 # Previously 500
# How much smaller than the local DM density
# do we care about?
k = params.min_enhancement
# Define AS cut
def r_AS(M_AMC):
m_22 = in_maeV / 1e-22
return 1e3 * (1.6 / m_22) * (M_AMC / 1e9) ** (-1 / 3)
alpha_AS = r_AS(1.0)
k_AMC = (3 / (4 * np.pi)) ** (1 / 3)
# Helper for MPI stuff
def MPI_send_chunks(data, dest, tag):
data_shape = data.shape
comm.send(data_shape, dest, tag)
data_flat = data.flatten()
# Split the data into N_chunks, each of maximum length 1e6
data_len = len(data_flat)
N_chunks = int(np.ceil(data_len / 1e6))
chunk_indices = np.array_split(np.arange(data_len), N_chunks)
print("Source:", data_len, N_chunks)
# Loop over the chunks and send
for inds in chunk_indices:
comm.send(data_flat[inds], dest, tag)
return None
def MPI_recv_chunks(source, tag):
data_shape = comm.recv(source=source, tag=tag)
data_flat = np.zeros(data_shape).flatten()
# Split the data into N_chunks, each of maximum length 1e6
data_len = len(data_flat)
N_chunks = int(np.ceil(data_len / 1e6))
print("Dest:", data_len, N_chunks)
chunk_indices = np.array_split(np.arange(data_len), N_chunks)
# Loop over the chunks and send
for inds in chunk_indices:
data_flat[inds] = comm.recv(source=source, tag=tag)
data = np.reshape(data_flat, data_shape)
return data
def main():
a_grid = None
if MPI_rank == 0:
# Gather the list of files to be used, then loop over semi-major axis a
ff1 = glob.glob(
dirs.montecarlo_dir + "AMC_logflat_*" + PROFILE + circ_text + ".txt"
)
a_grid = np.zeros(len(ff1))
print(dirs.montecarlo_dir)
for i, fname in enumerate(ff1):
# print(fname)
m = re.search("AMC_logflat_a=(.+?)_" + PROFILE + circ_text + ".txt", fname)
if m:
a_string = m.group(1)
a_grid[i] = float(a_string) * 1.0e3 # conversion to pc
a_grid = np.sort(a_grid)
print(len(a_grid))
print(a_grid)
if USING_MPI: # Tell all processes about the list, a_grid
a_grid = comm.bcast(a_grid, root=0)
# Edges to use for the output bins in R (galactocentric radius, pc)
if CIRCULAR:
R_centres = 1.0 * a_grid
else:
R_bin_edges = np.geomspace(0.05e3, 60e3, 65)
R_centres = np.sqrt(R_bin_edges[:-1] * R_bin_edges[1:])
mass_ini_all, mass_all, radius_all, e_all, a_all = load_AMC_results(a_grid)
# ----------------------------
# Re-weight the samples according to radius
if CIRCULAR:
(
AMC_weights,
AMC_weights_surv,
AMC_weights_masscut,
AMC_weights_AScut,
AMC_weights_AScut_masscut,
) = calculate_weights_circ(
a_grid, a_all, e_all, mass_all, mass_ini_all, radius_all
)
else:
(
AMC_weights,
AMC_weights_surv,
AMC_weights_masscut,
AMC_weights_AScut,
AMC_weights_AScut_masscut,
) = calculate_weights(
R_bin_edges, a_grid, a_all, e_all, mass_all, mass_ini_all, radius_all
) # Just pass the eccentricities and semi major axes
if USING_MPI:
comm.barrier()
if MPI_rank != 0:
comm.send(mass_ini_all, dest=0, tag=(10 * MPI_rank + 1))
comm.send(mass_all, dest=0, tag=(10 * MPI_rank + 2))
comm.send(radius_all, dest=0, tag=(10 * MPI_rank + 3))
comm.send(a_all, dest=0, tag=(10 * MPI_rank + 4))
comm.send(e_all, dest=0, tag=(10 * MPI_rank + 5))
# print(AMC_weights.shape)
# print(sys.getsizeof(AMC_weights))
# comm.send(AMC_weights.shape, dest=0,tag= (10*MPI_rank+6) )
# print("MPI_rank : ...")
# comm.Send(AMC_weights, dest=0, tag= (10*MPI_rank+7) )
MPI_send_chunks(AMC_weights, dest=0, tag=(10 * MPI_rank + 7))
MPI_send_chunks(AMC_weights_surv, dest=0, tag=(10 * MPI_rank + 9))
# comm.send(AMC_weights_surv, dest=0, tag= (10*MPI_rank+9) )
# print(MPI_rank)
# https://stackoverflow.com/questions/15833947/mpi-hangs-on-mpi-send-for-large-messages
if MPI_rank == 0:
for i in range(1, MPI_size):
mass_ini_tmp = comm.recv(source=i, tag=(10 * i + 1))
mass_tmp = comm.recv(source=i, tag=(10 * i + 2))
radius_tmp = comm.recv(source=i, tag=(10 * i + 3))
a_tmp = comm.recv(source=i, tag=(10 * i + 4))
e_tmp = comm.recv(source=i, tag=(10 * i + 5))
# req = comm.irecv(source=i, tag= (10*i+7) )
# comm.Recv(AMC_w_tmp, source=i, tag= (10*i+7) )
AMC_w_tmp = MPI_recv_chunks(source=i, tag=(10 * i + 7))
# AMC_w_surv_tmp = comm.recv(source=i, tag= (10*i+9) )
AMC_w_surv_tmp = MPI_recv_chunks(source=i, tag=(10 * i + 9))
mass_ini_all = np.concatenate((mass_ini_all, mass_ini_tmp))
mass_all = np.concatenate((mass_all, mass_tmp))
radius_all = np.concatenate((radius_all, radius_tmp))
a_all = np.concatenate((a_all, a_tmp))
e_all = np.concatenate((e_all, e_tmp))
AMC_weights = np.concatenate((AMC_weights, AMC_w_tmp))
AMC_weights_surv = np.concatenate((AMC_weights_surv, AMC_w_surv_tmp))
comm.barrier()
# quit()
if MPI_rank == 0:
# Calculate the survival probability as a function of a
psurv_a_list, psurv_a_AScut_list = calculate_survivalprobability(
a_grid, a_all, mass_all, mass_ini_all, radius_all
)
P_r_weights = np.sum(
AMC_weights, axis=0
) # Check if this should be a sum or integral
P_r_weights_surv = np.sum(AMC_weights_surv, axis=0)
P_r_weights_masscut = np.sum(AMC_weights_masscut, axis=0)
P_r_weights_AScut = np.sum(AMC_weights_AScut, axis=0)
P_r_weights_AScut_masscut = | np.sum(AMC_weights_AScut_masscut, axis=0) | numpy.sum |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not | np.array_equal(observed, images) | numpy.array_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 05:44:13 2019
@author: alan_
"""
### Se implementaran las funciones desarrolladas en la parte 1 en las registros.
import numpy as np
from matplotlib import pyplot as plt
from scipy import signal as sig
from scipy import fftpack as fft
from scipy.signal import chebwin
def PSD(matrix):
N1=np.size(matrix,1)
N2=np.size(matrix,0)
#rxx=sig.convolve2d(matrix,matrix,mode='same')
rxx=matrix
rxx=rxx*(chebwin(N1,at=100).reshape(1,N1)*np.ones((N2,1)))
sxx=np.fft.fft(rxx,axis=1)
mag_sxx=(np.abs(sxx[:,0:N1//2])*ts)**2
mag_sxx=10*np.log10(np.mean(mag_sxx,0))
F=np.linspace(0,fs//2,len(mag_sxx))
return F,mag_sxx
def Periodograma(p,v):
N=len(p)
if N % v!=0:
Nzeros=v-(N % v)
x=np.append(p,np.zeros(Nzeros)) # agregar ceros para mejorar la estimación, y para que el reshape se pueda hacer
else:
x=p
Nv=len(x)//v
matrix=x.reshape(v,Nv)
F,sxx=PSD(matrix)
plt.plot(F[0:len(F)//4],sxx[0:len(F)//4],label=lavel[j])
legend = plt.legend(loc='upper right', shadow=True, fontsize='small')
legend.get_frame().set_facecolor('pink')
plt.xlabel('Hz')
plt.ylabel('dBs')
plt.title(senal[i])
plt.grid()
def RF(den,num,fm):
w, h = sig.freqz(den,num)
h[h==0] = 1E-5
H = 20*np.log10( np.abs(h) )
W = np.angle (h)
W = np.unwrap (W)
W = np.degrees(W)
w = np.linspace(0,fs//2,H.shape[0] )
return w, W, H
def pseudo_inv(A):
U, Sigma, V = np.linalg.svd(A, full_matrices=False,compute_uv=True)
Sigma_pseu=1/Sigma
inv=np.matrix(U)*np.diag(Sigma_pseu)*np.matrix(V)
return inv
def inv_fitting(t,y):
A=[]
A.append(t)
A.append(np.ones(len(t)))
A=np.array(A).T
inv=pseudo_inv(A)
slcion=np.dot(y.reshape(1,len(y)),inv)
return slcion
def detrend(y,v):
N=len(y)
L=N//v
ind=0
ydet=[]
for i in range(N):
if (i+1) % L == 0:
t=np.arange(0,L)
fit=inv_fitting(t,y[ind:i+1])
ydet.append(y[ind:i+1]-(t*fit[0,0]+fit[0,1]))
ind=i+1
ydet = np.ndarray.flatten(np.array(ydet))
return ydet
def barlett_par (v):
x1=np.arange(0,v//2)
x2=np.arange(v//2,v)
triangular=np.append(2*x1/v,2-(2*x2/v))
return triangular
def MAverage(x,N):
L=len(x)-N
y= | np.zeros(L) | numpy.zeros |
#!/usr/bin/env python
import argparse
import ast
import numpy as np
import re
import torch
import torch.nn as nn
import torch.utils.data as data
from pathlib import Path
from torchvision import datasets, transforms
from typing import Dict, List, Optional, Tuple
ParamDict = Dict[str, np.ndarray]
class PytorchReshape(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = (-1,) + tuple(shape)
def forward(self, x):
return x.contiguous().view(self.shape)
class PytorchTranspose(nn.Module):
def __init__(self, *dims):
super().__init__()
self.dims = (0,) + tuple(d + 1 for d in dims)
def forward(self, x):
return x.permute(self.dims)
class PytorchParSum(nn.Module):
def __init__(self, par_sum_1, par_sum_2):
super().__init__()
self.par_sum_1 = nn.Sequential(*par_sum_1)
self.par_sum_2 = nn.Sequential(*par_sum_2)
def forward(self, x):
y_1 = self.par_sum_1(x)
y_2 = self.par_sum_2(x)
return y_1 + y_2
def _parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"eran_network", type=Path, help="path to the ERAN network to convert"
)
parser.add_argument(
"-o",
"--output",
type=Path,
default=Path("model.onnx"),
help="path to save the ONNX model",
)
parser.add_argument(
"--input_shape",
type=int,
nargs="+",
default=[1, 28, 28],
help="the input shape to the network (in CHW format)",
)
parser.add_argument(
"--drop_normalization",
action="store_true",
help="do not include any input normalization in the converted model",
)
parser.add_argument(
"--check_cifar_accuracy",
action="store_true",
help="evaluate the converted model on the CIFAR10 test set",
)
parser.add_argument(
"--check_mnist_accuracy",
action="store_true",
help="evaluate the converted model on the MNIST test set",
)
return parser.parse_args()
def parse_layer_params(param_str: str) -> ParamDict:
params = []
pattern = re.compile(r"([a-zA-Z_]+?=.+?),? [a-zA-Z]")
while True:
param_match = re.match(pattern, param_str)
if param_match is None:
params.append(param_str)
break
params.append(param_match.group(1))
param_str = param_str[param_match.end() - 1 :]
param_dict = {}
for param in params:
key, value = param.split("=")
param_dict[key] = np.array(ast.literal_eval(value))
return param_dict
def build_normalize(
parameters: ParamDict, input_shape: List[int], output_shape: List[int]
) -> nn.Module:
output_shape.extend(input_shape)
num_c = input_shape[0]
weights = np.diag(1.0 / parameters["std"])
bias = -parameters["mean"] / parameters["std"]
norm_layer = nn.Conv2d(num_c, num_c, 1, 1)
norm_layer.weight.data = torch.from_numpy(
weights.reshape(num_c, num_c, 1, 1)
).float()
norm_layer.bias.data = torch.from_numpy(bias).float()
return norm_layer
def build_linear(
weights: np.ndarray,
bias: np.ndarray,
activation: str,
input_shape: List[int],
output_shape: List[int],
) -> nn.Module:
flat_input_size = np.product(input_shape)
output_shape.append(bias.shape[0])
flat_output_size = | np.product(output_shape) | numpy.product |
import numpy as np
import numba
from numba import jit
NumericThresh = 1E-150
LogNumericThresh = np.log(NumericThresh)
EigenValueThresh = 1E-10
@jit(nopython=True)
def sample_variance(zero_mean_data_array,norm):
"""
Compute the variance of a zero meaned array. Divide by normalization factor.
zero_mean_data_array (required) : float64 array of data
norm (required) : float64 value to divide variance by - supplied so one can substract appropriate values etc from normalization
"""
# meta data from array
nDataPoints = zero_mean_data_array.shape[0]
# zero variance
var = np.float64(0.0)
# compute sum of variances over data
for i in range(nDataPoints):
var += zero_mean_data_array[i]**2
# returned averaged variance
return var/norm
@jit(nopython=True)
def weight_kabsch_dist_align(x1, x2, weights):
"""
Compute the Mahalabonis distance between positions x1 and x2 after aligned x1 to x2 given Kabsch weights (inverse variance)
x1 (required) : float64 array with dimensions (n_atoms,3) of one molecular configuration
x2 (required) : float64 array with dimensions (n_atoms,3) of another molecular configuration
weights (required) : float64 matrix with dimensions (n_atoms, n_atoms) of inverse (n_atoms, n_atoms) covariance
"""
# rotate x1 to x2 given Kabsch weights
x1_prime = weight_kabsch_rotate(x1, x2, weights)
# zero distance
dist = 0.0
# compute distance as sum over indepdent (because covar is n_atoms x n_atoms) dimensions
for i in range(3):
disp = x1_prime[:,i] - x2[:,i]
dist += np.dot(disp,np.dot(weights,disp))
# return distance - this is actually the squared Mahalabonis distance
return dist
@jit(nopython=True)
def weight_kabsch_dist(x1, x2, weights):
"""
Compute the Mahalabonis distance between positions x1 and x2 given Kabsch weights (inverse variance)
x1 (required) : float64 array with dimensions (n_atoms,3) of one molecular configuration
x2 (required) : float64 array with dimensions (n_atoms,3) of another molecular configuration
weights (required) : float64 matrix with dimensions (n_atoms, n_atoms) of inverse (n_atoms, n_atoms) covariance
"""
# zero distance
dist = 0.0
# compute distance as sum over indepdent (because covar is n_atoms x n_atoms) dimensions
for i in range(3):
disp = x1[:,i] - x2[:,i]
dist += np.dot(disp,np.dot(weights,disp))
# return value
return dist
@jit(nopython=True)
def pseudo_lpdet_inv(sigma):
N = sigma.shape[0]
e, v = np.linalg.eigh(sigma)
precision = np.zeros(sigma.shape,dtype=np.float64)
lpdet = 0.0
rank = 0
for i in range(N):
if (e[i] > EigenValueThresh):
lpdet += np.log(e[i])
precision += 1.0/e[i]*np.outer(v[:,i],v[:,i])
rank += 1
return lpdet, precision, rank
@jit(nopython=True)
def lpdet_inv(sigma):
N = sigma.shape[0]
e, v = np.linalg.eigh(sigma)
lpdet = 0.0
for i in range(N):
if (e[i] > EigenValueThresh):
lpdet -= np.log(e[i])
return lpdet
@jit(nopython=True)
def uniform_kabsch_log_lik(x, mu):
# meta data
n_frames = x.shape[0]
n_atoms = x.shape[1]
# compute log Likelihood for all points
log_lik = 0.0
sampleVar = 0.0
for i in range(n_frames):
for j in range(3):
disp = x[i,:,j] - mu[:,j]
temp = np.dot(disp,disp)
sampleVar += temp
log_lik += temp
# finish variance
sampleVar /= (n_frames-1)*3*(n_atoms-1)
log_lik /= sampleVar
log_lik += n_frames * 3 * (n_atoms-1) * np.log(sampleVar)
log_lik *= -0.5
return log_lik
@jit(nopython=True)
def intermediate_kabsch_log_lik(x, mu, kabsch_weights):
# meta data
n_frames = x.shape[0]
# determine precision and pseudo determinant
lpdet = lpdet_inv(kabsch_weights)
# compute log Likelihood for all points
log_lik = 0.0
for i in range(n_frames):
#disp = x[i] - mu
for j in range(3):
disp = x[i,:,j] - mu[:,j]
log_lik += np.dot(disp,np.dot(kabsch_weights,disp))
log_lik += 3 * n_frames * lpdet
log_lik *= -0.5
return log_lik
@jit(nopython=True)
def weight_kabsch_log_lik(x, mu, precision, lpdet):
# meta data
n_frames = x.shape[0]
# compute log Likelihood for all points
log_lik = 0.0
for i in range(n_frames):
#disp = x[i] - mu
for j in range(3):
disp = x[i,:,j] - mu[:,j]
log_lik += np.dot(disp,np.dot(precision,disp))
log_lik += 3 * n_frames * lpdet
log_lik *= -0.5
return log_lik
@jit(nopython=True)
def weight_kabsch_rotate(mobile, target, weights):
correlation_matrix = np.dot(np.transpose(mobile), np.dot(weights, target))
V, S, W_tr = np.linalg.svd(correlation_matrix)
if np.linalg.det(V) * np.linalg.det(W_tr) < 0.0:
V[:, -1] = -V[:, -1]
rotation = np.dot(V, W_tr)
mobile_prime = np.dot(mobile,rotation)
return mobile_prime
@jit(nopython=True)
def weight_kabsch_rmsd(mobile, target, weights):
xyz1_prime = weight_kabsch_rotate(mobile, target, weights)
delta = xyz1_prime - target
rmsd = (delta ** 2.0).sum(1).mean() ** 0.5
return rmsd
@jit(nopython=True)
def rmsd_kabsch(xyz1, xyz2):
xyz1_prime = kabsch_rotate(xyz1, xyz2)
delta = xyz1_prime - xyz2
rmsd = (delta ** 2.0).sum(1).mean() ** 0.5
return rmsd
@jit(nopython=True)
def kabsch_rotate(mobile, target):
correlation_matrix = np.dot(np.transpose(mobile), target)
V, S, W_tr = np.linalg.svd(correlation_matrix)
if np.linalg.det(V) * np.linalg.det(W_tr) < 0.0:
V[:, -1] = -V[:, -1]
rotation = np.dot(V, W_tr)
mobile_prime = np.dot(mobile,rotation)
return mobile_prime
@jit(nopython=True)
def kabsch_transform(mobile, target):
translation, rotation = compute_translation_and_rotation(mobile, target)
#mobile_prime = mobile.dot(rotation) + translation
mobile_prime = np.dot(mobile,rotation) #+ translation
return mobile_prime
@jit(nopython=True)
def compute_translation_and_rotation(mobile, target):
#meta data
n_atoms = mobile.shape[0]
nDim = mobile.shape[1]
mu1 = np.zeros(nDim)
mu2 = np.zeros(nDim)
for i in range(n_atoms):
for j in range(nDim):
mu1[j] += mobile[i,j]
mu2[j] += target[i,j]
mu1 /= n_atoms
mu2 /= n_atoms
mobile = mobile - mu1
target = target - mu2
correlation_matrix = np.dot(np.transpose(mobile), target)
V, S, W_tr = np.linalg.svd(correlation_matrix)
#is_reflection = (np.linalg.det(V) * np.linalg.det(W_tr)) < 0.0
if np.linalg.det(V) * np.linalg.det(W_tr) < 0.0:
V[:, -1] = -V[:, -1]
rotation = np.dot(V, W_tr)
translation = mu2 - np.dot(mu1,rotation)
return translation, rotation
# remove COG translation
@jit(nopython=True)
def traj_remove_cog_translation(traj_data):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# start be removing COG translation
for ts in range(n_frames):
mu = np.zeros(nDim)
for atom in range(n_atoms):
mu += traj_data[ts,atom]
mu /= n_atoms
traj_data[ts] -= mu
return traj_data
@jit(nopython=True)
def particle_variances_from_trajectory(traj_data, avg):
# meta data
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
#
disp = traj_data - avg
particleVariances = np.zeros(n_atoms,dtype=np.float64)
for ts in range(n_frames):
for atom in range(n_atoms):
particleVariances[atom] += np.dot(disp[ts,atom],disp[ts,atom])
particleVariances /= 3*(n_frames-1)
return particleVariances
@jit(nopython=True)
def intermediate_kabsch_weights(variances):
# meta data
n_atoms = variances.shape[0]
# kasbch weights are inverse of variances
inverseVariances = np.power(variances,-1)
kabsch_weights = np.zeros((n_atoms,n_atoms),dtype=np.float64)
# force constant vector to be null space of kabsch weights
wsum = np.sum(inverseVariances)
for i in range(n_atoms):
# Populate diagonal elements
kabsch_weights[i,i] = inverseVariances[i]
for j in range(n_atoms):
kabsch_weights[i,j] -= inverseVariances[i]*inverseVariances[j]/wsum
# return the weights
return kabsch_weights
# compute the average structure and covariance from trajectory data
@jit(nopython=True)
def traj_iterative_average_vars_intermediate_kabsch(traj_data,thresh=1E-3,max_steps=300):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# Initialize with uniform weighted Kabsch
avg, aligned_pos = traj_iterative_average(traj_data,thresh)
# Compute Kabsch Weights
particleVariances = particle_variances_from_trajectory(aligned_pos, avg)
kabsch_weights = intermediate_kabsch_weights(particleVariances)
log_lik = intermediate_kabsch_log_lik(aligned_pos,avg,kabsch_weights)
# perform iterative alignment and average to converge average
log_lik_diff = 10
step = 0
while log_lik_diff > thresh and step < max_steps:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
aligned_pos[ts] = weight_kabsch_rotate(aligned_pos[ts], avg, kabsch_weights)
new_avg += aligned_pos[ts]
# finish average
new_avg /= n_frames
# compute log likelihood
new_log_lik = intermediate_kabsch_log_lik(aligned_pos,avg,kabsch_weights)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
# compute new Kabsch Weights
particleVariances = particle_variances_from_trajectory(aligned_pos,new_avg)
kabschWeightes = intermediate_kabsch_weights(particleVariances)
# compute Distance between averages
avgRmsd = weight_kabsch_dist_align(avg,new_avg,kabsch_weights)
avg = np.copy(new_avg)
step += 1
print(step, avgRmsd,log_lik)
return aligned_pos, avg, particleVariances
# compute the average structure and covariance from trajectory data
@jit(nopython=True)
def traj_iterative_average_precision_weighted_kabsch(traj_data,thresh=1E-3,max_steps=300):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# Initialize with uniform weighted Kabsch
avg, aligned_pos = traj_iterative_average(traj_data,thresh)
# Compute Kabsch Weights
disp = aligned_pos - avg
covar = np.zeros((n_atoms,n_atoms),dtype=np.float64)
for ts in range(n_frames):
covar += np.dot(disp[ts],disp[ts].T)
covar /= nDim*(n_frames-1)
# determine precision and pseudo determinant
lpdet, precision, rank = pseudo_lpdet_inv(covar)
# compute log likelihood
log_lik = weight_kabsch_log_lik(aligned_pos, avg, precision, lpdet)
# perform iterative alignment and average to converge average
log_lik_diff = 10+thresh
step = 0
while log_lik_diff > thresh and step < max_steps:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
aligned_pos[ts] = weight_kabsch_rotate(aligned_pos[ts], avg, precision)
new_avg += aligned_pos[ts]
# finish average
new_avg /= n_frames
# compute new Kabsch Weights
covar = np.zeros((n_atoms,n_atoms),dtype=np.float64)
for ts in range(n_frames):
disp = aligned_pos[ts] - new_avg
covar += np.dot(disp,disp.T)
covar /= nDim*(n_frames-1)
# determine precision and pseudo determinant
lpdet, precision, rank = pseudo_lpdet_inv(covar)
# compute log likelihood
new_log_lik = weight_kabsch_log_lik(aligned_pos, new_avg, precision, lpdet)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
avg = np.copy(new_avg)
step += 1
# print(step, log_lik)
return aligned_pos, avg, precision, lpdet
# compute the average structure and covariance from trajectory data
@jit(nopython=True)
def traj_iterative_average_weighted_kabsch(traj_data,thresh=1E-3,max_steps=200):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# Initialize with uniform weighted Kabsch
avg, aligned_pos = traj_iterative_average(traj_data,thresh)
# Compute Kabsch Weights
disp = aligned_pos - avg
covar = np.zeros((n_atoms,n_atoms),dtype=np.float64)
for ts in range(n_frames):
covar += np.dot(disp[ts],disp[ts].T)
covar /= nDim*(n_frames-1)
# determine precision and pseudo determinant
lpdet, precision, rank = pseudo_lpdet_inv(covar)
# perform iterative alignment and average to converge average
log_lik = weight_kabsch_log_lik(aligned_pos, avg, precision, lpdet)
log_lik_diff = 10
step = 0
while log_lik_diff > thresh and step < max_steps:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
aligned_pos[ts] = weight_kabsch_rotate(aligned_pos[ts], avg, precision)
new_avg += aligned_pos[ts]
# finish average
new_avg /= n_frames
# compute new Kabsch Weights
covar = np.zeros((n_atoms,n_atoms),dtype=np.float64)
disp = aligned_pos - new_avg
for ts in range(n_frames):
covar += np.dot(disp[ts],disp[ts].T)
covar /= nDim*(n_frames-1)
# determine precision and pseudo determinant
lpdet, precision, rank = pseudo_lpdet_inv(covar)
# compute new log likelihood
new_log_lik = weight_kabsch_log_lik(aligned_pos, new_avg, precision, lpdet)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
avg = np.copy(new_avg)
step += 1
# return average structure and aligned trajectory
return avg, aligned_pos
# compute the average structure from trajectory data
@jit(nopython=True)
def traj_iterative_average(traj_data,thresh=1E-3):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# create numpy array of aligned positions
aligned_pos = np.copy(traj_data).astype(np.float64)
# start be removing COG translation
for ts in range(n_frames):
mu = np.zeros(nDim)
for atom in range(n_atoms):
mu += aligned_pos[ts,atom]
mu /= n_atoms
aligned_pos[ts] -= mu
# Initialize average as first frame
avg = np.copy(aligned_pos[0]).astype(np.float64)
log_lik = uniform_kabsch_log_lik(aligned_pos,avg)
# perform iterative alignment and average to converge log likelihood
log_lik_diff = 10
count = 1
while log_lik_diff > thresh:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
aligned_pos[ts] = kabsch_rotate(aligned_pos[ts], avg)
new_avg += aligned_pos[ts]
# finish average
new_avg /= n_frames
# compute log likelihood
new_log_lik = uniform_kabsch_log_lik(aligned_pos,avg)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
# copy new average
avg = np.copy(new_avg)
count += 1
return avg, aligned_pos
# compute the average structure from trajectory data
@jit(nopython=True)
def traj_iterative_average_covar(traj_data,thresh=1E-3):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# create numpy array of aligned positions
aligned_pos = np.copy(traj_data)
# Initialize average as first frame
avg = np.copy(aligned_pos[0]).astype(np.float64)
log_lik = uniform_kabsch_log_lik(aligned_pos,avg)
# perform iterative alignment and average to converge average
log_lik_diff = 10
while log_lik_diff > thresh:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
# align positions
aligned_pos[ts] = kabsch_rotate(aligned_pos[ts], avg)
new_avg += aligned_pos[ts]
# finish average
new_avg /= n_frames
# compute log likelihood
new_log_lik = uniform_kabsch_log_lik(aligned_pos,avg)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
avg = np.copy(new_avg)
covar = np.zeros((n_atoms*nDim,n_atoms*nDim),dtype=np.float64)
# loop over trajectory and compute average and covariance
for ts in range(n_frames):
disp = (aligned_pos[ts]-avg).flatten()
covar += np.outer(disp,disp)
# finish average
covar /= (n_frames-1)
return avg, covar
# compute the average structure from weighted trajectory data
@jit(nopython=True)
def traj_iterative_average_weighted(traj_data, weights, prev_avg=None, thresh=1E-3):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
nFeatures = n_atoms*nDim
# determine normalization
norm = np.power(np.sum(weights),-1)
weights *= norm
# create numpy array of aligned positions
aligned_pos = np.copy(traj_data)
# Initialize average
if prev_avg == None:
avg = np.copy(traj_data[np.argmax(weights)])
else:
avg = np.copy(prev_avg)
log_lik = uniform_kabsch_log_lik(aligned_pos,avg)
# perform iterative alignment and average to converge average
log_lik_diff = 10
while log_lik_diff > thresh:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
# align to average
aligned_pos[ts] = kabsch_rotate(aligned_pos[ts], avg)
new_avg += weights[ts]*aligned_pos[ts]
# compute log likelihood
new_log_lik = uniform_kabsch_log_lik(aligned_pos,avg)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
# copy new avg
avg = np.copy(new_avg)
return aligned_pos, avg
# compute the average structure and covariance from weighted trajectory data
@jit(nopython=True)
def traj_iterative_average_precision_weighted_weighted_kabsch(traj_data, weights, prev_avg, prev_precision, prev_lpdet, thresh=1E-3, max_steps=100):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
nFeatures = n_atoms*nDim
# determine normalization
norm = np.power(np.sum(weights),-1)
weights *= norm
# create numpy array of aligned positions
aligned_pos = np.copy(traj_data)
# Initialize average with previous average
avg = np.copy(prev_avg)
# compute log likelihood of current trajectory alignment
log_lik = weight_kabsch_log_lik(aligned_pos, avg, prev_precision, prev_lpdet)
# perform iterative alignment and average to converge average
log_lik_diff = 10 + thresh
step = 0
while log_lik_diff > thresh and step < max_steps:
# rezero new average
new_avg = np.zeros((n_atoms,nDim),dtype=np.float64)
# align trajectory to average and accumulate new average
for ts in range(n_frames):
# align to average
aligned_pos[ts] = weight_kabsch_rotate(aligned_pos[ts], avg, precision)
new_avg += weights[ts]*aligned_pos[ts]
# compute new Kabsch Weights
covar = np.zeros((n_atoms,n_atoms),dtype=np.float64)
for ts in range(n_frames):
disp = aligned_pos[ts] - new_avg
covar += weights[ts]*np.dot(disp,disp.T)
covar /= 3.0
# determine precision and pseudo determinant
lpdet, precision, rank = pseudo_lpdet_inv(covar)
# compute log likelihood
new_log_lik = weight_kabsch_log_lik(aligned_pos, new_avg, precision, lpdet)
log_lik_diff = np.abs(new_log_lik-log_lik)
log_lik = new_log_lik
# copy new avg
avg = np.copy(new_avg)
step += 1
return avg, precision, lpdet
# align trajectory data to a reference structure
@jit(nopython=True)
def traj_align_weighted_kabsch(traj_data, ref, precision):
# trajectory metadata
n_frames = traj_data.shape[0]
# create numpy array of aligned positions
aligned_pos = np.copy(traj_data)
for ts in range(n_frames):
# align positions based on weighted Kabsch
aligned_pos[ts] = weight_kabsch_rotate(aligned_pos[ts], ref, precision)
return aligned_pos
# align trajectory data to a reference structure
@jit(nopython=True)
def traj_align(traj_data,ref):
# trajectory metadata
n_frames = traj_data.shape[0]
# create numpy array of aligned positions
aligned_pos = np.copy(traj_data)
for ts in range(n_frames):
# make sure positions are centered
aligned_pos[ts] = kabsch_rotate(aligned_pos[ts], ref)
return aligned_pos
# compute the covariance from trajectory data
# we assume the trajectory is aligned here
@jit(nopython=True)
def traj_covar(traj_data):
# trajectory metadata
n_frames = traj_data.shape[0]
n_atoms = traj_data.shape[1]
nDim = traj_data.shape[2]
# Initialize average and covariance arrays
avg = np.zeros((n_atoms*nDim))
covar = np.zeros((n_atoms*nDim,n_atoms*nDim))
# loop over trajectory and compute average and covariance
for ts in range(n_frames):
flat = traj_data[ts].flatten()
avg += flat
covar += np.outer(flat,flat)
# finish averages
avg /= n_frames
covar /= n_frames
# finish covar
covar -= np.outer(avg,avg)
return covar
# compute the time separated covariance matrix
@jit(nopython=True)
def traj_time_covar(traj1, traj2, mean1, mean2, lag):
# trajectory metadata
n_frames = traj1.shape[0]
n_atoms = traj1.shape[1]
nDim = traj1.shape[2]
# declare covar
covar = np.zeros((n_atoms*nDim,n_atoms*nDim),dtype=np.float64)
# loop over trajectory and compute average and covariance
for ts in range(n_frames-lag):
disp1 = traj1[ts].flatten()-mean1.flatten()
disp2 = traj2[ts+lag].flatten()-mean2.flatten()
covar += | np.outer(disp1,disp2) | numpy.outer |
import cv2
import os
import pdb
import numpy as np
from numpy.lib.function_base import piecewise
from tqdm import tqdm
import matplotlib.pyplot as plt
from PIL import Image
import copy
# read an image
def read_img(filename,denoising=False):
# img = Image.open(filename) #! RGB
img=cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# scale 0~255 to 0~1
if denoising:
img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
np_img = np.array(img, dtype=np.float32) / 255.
original_h, original_w, _ = np_img.shape
# np_img = cv2.resize(np_img, img_wh, interpolation=cv2.INTER_LINEAR)
return np_img, original_h, original_w
def region_seg(scan_folder, ref_view,prt_img=False):
use_new_depth=False
import cv2
try:
ref_img_orig, _, _ = read_img(os.path.join(scan_folder,'edge_detect/{:0>8}.jpg'.format(ref_view)),denoising=False)
except:
print('region_seg error')
return False
ref_img = cv2.cvtColor(ref_img_orig, cv2.COLOR_RGB2BGR) #! plt也是BGR
# ref_lines_file_path=os.path.join(scan_folder, 'images/save_lines/{:0>8}_lines.txt'.format(ref_view))
# ref_lines = np.loadtxt(ref_lines_file_path,delimiter=',')
# ref_img_orig=plot_lines(ref_img_orig,ref_lines,change_color=False)
ref_img=copy.deepcopy(ref_img_orig)
ref_img_gray = cv2.cvtColor(ref_img,cv2.COLOR_RGB2GRAY)
save_dir=os.path.join(scan_folder, "edge_detect/region_seg")
mask_save_dir=os.path.join(scan_folder, "edge_detect/region_seg_mask")
os.makedirs(save_dir, exist_ok=True)
os.makedirs(mask_save_dir, exist_ok=True)
mask_filename=os.path.join(mask_save_dir,"{:0>8}_region_seg_mask.npy".format(ref_view))
height,width,_=ref_img.shape
patchsize=10
mask=np.zeros((height,width),dtype=np.float32)
if os.path.exists(mask_filename) and 0:
mask=np.load(mask_filename)
# mask=np.cv2.imread(mask_filename)
else:
# start_time=time.time()
seg_values={}
for row in tqdm(range(patchsize,height-patchsize)):
#! 基于相邻灰度差异的方式
near_gray_gap=-np.ones((width,)).astype(np.float32) #*初值全为-1
for col in range(patchsize,width-patchsize):
patch_src_i=ref_img_gray[row,col-patchsize:col+patchsize]
near_gray_gap[col]=abs(patch_src_i.max()-patch_src_i.min())
near_gray_gap[near_gray_gap<0]=near_gray_gap.max()
simi_pixel=np.where(near_gray_gap<0.04)[0] #! 0.05
pixel_gap= | np.ones_like(simi_pixel) | numpy.ones_like |
import itertools
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matrices import start_prob, expert_emission_matrix, hmt_start_prob, hmt_expert_emission_matrix
def viterbi(observed_seq, transition_matrices, n_states=5):
"""
Calculate the most probable sequence of hidden states using time-dependent transition matrices.
"""
startprob = start_prob()
emissionmat = expert_emission_matrix()
N = len(observed_seq)
Z = np.zeros(N)
T1 = np.zeros([n_states, N])
T2 = np.full((n_states, N), -1)
T1[:, 0] = np.log(startprob * emissionmat[:, observed_seq[0]])
for j in range(1, N):
for i in range(n_states):
probabilities = (T1[:, j - 1] + np.log(transition_matrices[j - 1][:, i])
+ np.log(emissionmat[i, observed_seq[j]]))
T1[i, j] = np.max(probabilities)
T2[i, j] = np.argmax(probabilities)
Z[-1] = np.argmax(T1[:, N - 1])
for i in range(N - 2, 0, -1):
Z[i] = T2[int(Z[i + 1]), i + 1]
Z = Z.astype(int)
return Z, T1, T2
def hmt_viterbi(X_obs, tree_obs, transition_matrices, dev_abb, dev_map, n_states=10):
"""
Calculate the most probable hidden tree using time-dependent transition matrices.
The tree structure is based on the results from STREAM.
"""
startprob = hmt_start_prob()
emission = hmt_expert_emission_matrix()
tm = transition_matrices
M = np.zeros((len(X_obs.index), n_states))
opt_states = np.full((len(X_obs.index), n_states), -1)
# Initialization
for leaf in tree_obs['leaves']:
M[int(leaf)] = np.log(emission[:, dev_map[X_obs.loc[leaf]['label']]])
branches = ['branch S6-S5', 'branch S4-S3', 'branch S2-S1', 'branch S0-S1']
for b in branches:
branch = tree_obs[b]
for i in range(1, len(branch)):
m_max = []
for j in range(n_states):
prob = M[int(branch[i - 1])] + np.log(tm[int(branch[i - 1])][j])
m_max.append(np.max(prob))
opt_states[int(branch[i - 1]), j] = np.argmax(prob)
m_max = np.array(m_max)
M[int(branch[i])] = m_max + np.log(emission[:, dev_map[X_obs.loc[branch[i]]['label']]])
# Branching point S1 '963'
branching_point = '963'
child = tree_obs['children'][branching_point]
m_max1 = []
for j in range(n_states):
prob = M[int(child[0])] + np.log(tm[int(child[0])][j])
m_max1.append(np.max(prob))
opt_states[int(child[0]), j] = np.argmax(prob)
m_max1 = np.array(m_max1)
m_max2 = []
for j in range(n_states):
prob = M[int(child[1])] + np.log(tm[int(child[1])][j])
m_max2.append(np.max(prob))
opt_states[int(child[1]), j] = np.argmax(prob)
m_max2 = np.array(m_max2)
M[int(branching_point)] = m_max1 + m_max2 + np.log(emission[:, dev_map[X_obs.loc[branching_point]['label']]])
# Branch S1-S3
branch = tree_obs['branch S1-S3']
for i in range(1, len(branch)):
m_max = []
for j in range(n_states):
prob = M[int(branch[i - 1])] + np.log(tm[int(branch[i - 1])][j])
m_max.append(np.max(prob))
opt_states[int(branch[i - 1]), j] = np.argmax(prob)
m_max = np.array(m_max)
M[int(branch[i])] = m_max + np.log(emission[:, dev_map[X_obs.loc[branch[i]]['label']]])
# Branching point S3 '509'
branching_point = '509'
child = tree_obs['children'][branching_point]
m_max1 = []
for j in range(n_states):
prob = M[int(child[0])] + np.log(tm[int(child[0])][j])
m_max1.append(np.max(prob))
opt_states[int(child[0]), j] = | np.argmax(prob) | numpy.argmax |
""" Runs the alignment test generated by elicitation.py on a set of test rewards and reports
performance. """
import logging
import pickle as pkl
from functools import partial
from itertools import product
from pathlib import Path
from typing import (
Dict,
Generator,
List,
Literal,
NamedTuple,
Optional,
Sequence,
Set,
Tuple,
Union,
cast,
)
import argh # type: ignore
import numpy as np
import tensorflow as tf # type: ignore
from driver.gym_env.legacy_env import LegacyEnv
from gym.spaces import flatten # type: ignore
from search import GeometricSearch, TestRewardSearch
tf.config.set_visible_devices([], "GPU") # Car simulation stuff is faster on cpu
from argh import arg
from driver.legacy.models import Driver
from gym.core import Env # type: ignore
from joblib import Parallel, delayed # type: ignore
from sklearn.metrics import confusion_matrix # type: ignore
from active.simulation_utils import TrajOptimizer, assert_normals, make_normals, orient_normals
from equiv_utils import add_equiv_constraints, remove_equiv
from random_baseline import make_random_questions
from testing_factory import TestFactory
from utils import (
assert_nonempty,
assert_reward,
assert_rewards,
get_mean_reward,
load,
make_gaussian_rewards,
parse_replications,
rollout,
setup_logging,
shape_compat,
)
Experiment = Tuple[float, Optional[float], int]
input_features_name = Path("input_features.npy")
normals_name = Path("normals.npy")
preferences_name = Path("preferences.npy")
true_reward_name = Path("true_reward.npy")
flags_name = Path("flags.pkl")
use_equiv = False
# Top level functions callable from fire
@arg("--epsilons", nargs="+", type=float)
def premake_test_rewards(
epsilons: List[float] = [0.0],
n_rewards: int = 100,
n_test_states: Optional[int] = None,
n_gt_test_questions: int = 10000,
true_reward_name: Path = Path("true_reward.npy"),
datadir: Path = Path(),
outdir: Path = Path(),
replications: Optional[Union[str, Tuple[int, ...]]] = None,
n_cpus: int = 1,
overwrite: bool = False,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
):
""" Finds test rewards for each experiment. """
outdir.mkdir(parents=True, exist_ok=True)
# TODO(joschnei): I'm making some dangerous logging decisions. Do I want to append to logs, or
# give logs unique names? I really need to pick at least one.
setup_logging(verbosity, log_path=outdir / "log.txt")
if replications is not None:
replication_indices = parse_replications(replications)
for replication in replication_indices:
if not (datadir / str(replication)).exists():
logging.warning(f"Replication {replication} does not exist, skipping")
continue
premake_test_rewards(
epsilons=epsilons,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=n_gt_test_questions,
true_reward_name=true_reward_name,
datadir=datadir / str(replication),
outdir=outdir / str(replication),
use_equiv=use_equiv,
n_cpus=n_cpus,
overwrite=overwrite,
verbosity=verbosity,
)
logging.info(f"Done with replication {replication}")
exit()
true_reward = np.load(datadir / true_reward_name)
assert_reward(true_reward, False, 4)
with Parallel(n_jobs=n_cpus) as parallel:
make_test_rewards(
epsilons=epsilons,
true_reward=true_reward,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=int(n_gt_test_questions),
outdir=outdir,
parallel=parallel,
use_equiv=use_equiv,
overwrite=overwrite,
)
@arg("--epsilons", nargs="+", type=float)
@arg("--deltas", nargs="+", type=float)
@arg("--human-samples", nargs="+", type=int)
def simulated(
epsilons: List[float] = [0.0],
n_rewards: int = 100,
human_samples: List[int] = [1],
n_reward_samples: int = 1000,
n_test_states: Optional[int] = None,
n_gt_test_questions: int = 10000,
traj_opt: bool = False,
datadir: Path = Path(),
outdir: Path = Path(),
deltas: List[Optional[float]] = [None],
use_mean_reward: bool = False,
use_random_test_questions: bool = False,
n_random_test_questions: Optional[int] = None,
use_cheating_questions: bool = False,
skip_remove_duplicates: bool = False,
skip_epsilon_filtering: bool = False,
skip_redundancy_filtering: bool = False,
use_true_epsilon: bool = False,
legacy_test_rewards: bool = False,
replications: Optional[Union[str, Tuple[int, ...]]] = None,
n_cpus: int = 1,
overwrite_test_rewards: bool = False,
overwrite_results: bool = False,
verbosity: Literal["INFO", "DEBUG"] = "INFO",
) -> None:
""" Evaluates alignment test generated by ground-truth rewards. """
logging.basicConfig(level=verbosity, format="%(levelname)s:%(asctime)s:%(message)s")
if replications is not None:
replication_indices = parse_replications(replications)
for replication in replication_indices:
if not (datadir / str(replication)).exists():
logging.warning(f"Replication {replication} does not exist, skipping")
continue
logging.info(f"Starting replication {replication}")
simulated(
epsilons=epsilons,
deltas=deltas,
n_rewards=n_rewards,
human_samples=human_samples,
n_reward_samples=n_reward_samples,
n_test_states=n_test_states,
n_gt_test_questions=n_gt_test_questions,
datadir=datadir / str(replication),
outdir=outdir / str(replication),
use_mean_reward=use_mean_reward,
use_random_test_questions=use_random_test_questions,
use_cheating_questions=use_cheating_questions,
n_random_test_questions=n_random_test_questions,
skip_remove_duplicates=skip_remove_duplicates,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
use_true_epsilon=use_true_epsilon,
legacy_test_rewards=legacy_test_rewards,
n_cpus=n_cpus,
overwrite_test_rewards=overwrite_test_rewards,
overwrite_results=overwrite_results,
verbosity=verbosity,
)
exit()
logging.info(f"Using {n_cpus} cpus.")
parallel = Parallel(n_jobs=n_cpus)
outdir.mkdir(parents=True, exist_ok=True)
if n_random_test_questions is not None:
# Argh defaults to parsing something as a string if its optional
n_random_test_questions = int(n_random_test_questions)
flags = pkl.load(open(datadir / flags_name, "rb"))
query_type = flags["query_type"]
equiv_probability = flags["equiv_size"]
env = Driver()
n_reward_features = env.num_of_features
logging.info("Loading elicitation results")
elicited_normals, elicited_preferences, elicited_input_features = load_elicitation(
datadir=datadir,
normals_name=normals_name,
preferences_name=preferences_name,
input_features_name=input_features_name,
n_reward_features=n_reward_features,
use_equiv=use_equiv,
query_type=query_type,
equiv_probability=equiv_probability,
)
true_reward = np.load(datadir / true_reward_name)
assert_reward(true_reward, False, n_reward_features)
if use_equiv:
true_reward = np.append(true_reward, [1])
else:
assert not np.any(elicited_preferences == 0)
factory = TestFactory(
query_type=query_type,
reward_dimension=elicited_normals.shape[1],
equiv_probability=equiv_probability,
n_reward_samples=n_reward_samples,
use_mean_reward=use_mean_reward,
skip_dedup=skip_remove_duplicates,
skip_noise_filtering=True,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
use_true_epsilon=use_true_epsilon,
true_reward=true_reward,
)
logging.info(
f"""Filtering settings:
# reward samples={n_reward_samples},
use mean reward={use_mean_reward},
skip duplicates={skip_remove_duplicates}
skip noise={True}
skip epsilon={skip_epsilon_filtering}
skip redundancy={skip_redundancy_filtering}
use true epsilon={use_true_epsilon}
"""
)
confusion_path, test_path = make_outnames(
outdir,
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
)
confusions: Dict[Experiment, np.ndarray] = load(confusion_path, overwrite_results, default={})
minimal_tests: Dict[Experiment, np.ndarray] = load(test_path, overwrite_results, default={})
experiments = make_experiments(
epsilons, deltas, human_samples, overwrite_results, experiments=set(minimal_tests.keys())
)
if use_random_test_questions:
logging.info("Making random test")
logging.info(f"True reward: {true_reward}")
normals, preferences, input_features = make_random_test(
n_random_test_questions,
elicited_input_features,
elicited_preferences,
reward_iterations=flags["reward_iterations"],
query_type=query_type,
equiv_size=flags["equiv_size"],
sim=env,
use_equiv=use_equiv,
)
good_indices = (true_reward @ normals.T) > 0
logging.info(f"{np.mean(good_indices)*100:2f}% of new test questions agree with gt reward.")
if use_cheating_questions:
logging.info(f"Selecting only questions consistent with gt reward")
normals = normals[good_indices]
preferences = preferences[good_indices]
input_features = input_features[good_indices]
assert_normals(normals, use_equiv)
else:
max_n = max(human_samples)
preferences = elicited_preferences[:max_n]
input_features = elicited_input_features[:max_n]
logging.debug(f"elicited_normals={elicited_normals[:10]}")
normals = orient_normals(
elicited_normals[:max_n], preferences, use_equiv, n_reward_features
)
logging.debug(f"normals={normals[:10]}")
assert np.all(true_reward @ normals.T >= 0)
if not legacy_test_rewards:
test_rewards = make_test_rewards(
epsilons=epsilons,
true_reward=true_reward,
n_rewards=n_rewards,
n_test_states=n_test_states,
n_gt_test_questions=int(n_gt_test_questions),
traj_opt=traj_opt,
outdir=outdir,
parallel=parallel,
use_equiv=use_equiv,
overwrite=overwrite_test_rewards,
)
else:
test_rewards = legacy_make_test_rewards(1000, n_rewards, true_reward, epsilons, use_equiv)
for indices, confusion, experiment in parallel(
delayed(run_gt_experiment)(
normals=normals,
test_rewards=test_rewards[epsilon][0],
test_reward_alignment=test_rewards[epsilon][1],
epsilon=epsilon,
delta=delta,
use_equiv=use_equiv,
n_human_samples=n,
factory=factory,
input_features=input_features,
preferences=preferences,
outdir=outdir,
verbosity=verbosity,
)
for epsilon, delta, n in experiments
):
minimal_tests[experiment] = indices
confusions[experiment] = confusion
pkl.dump(confusions, open(confusion_path, "wb"))
pkl.dump(minimal_tests, open(test_path, "wb"))
@arg("--epsilons", nargs="+", type=float)
@arg("--deltas", nargs="+", type=float)
@arg("--human-samples", nargs="+", type=int)
def human(
epsilons: List[float] = [0.0],
deltas: List[float] = [0.05],
n_rewards: int = 10000,
human_samples: List[int] = [1],
n_model_samples: int = 1000,
input_features_name: Path = Path("input_features.npy"),
normals_name: Path = Path("normals.npy"),
preferences_name: Path = Path("preferences.npy"),
flags_name: Path = Path("flags.pkl"),
datadir: Path = Path("questions"),
outdir: Path = Path("questions"),
rewards_path: Optional[Path] = None,
use_mean_reward: bool = False,
skip_remove_duplicates: bool = False,
skip_epsilon_filtering: bool = False,
skip_redundancy_filtering: bool = False,
n_cpus: int = 1,
overwrite: bool = False,
):
""" Evaluates alignment test elicited from a human. """
outdir.mkdir(parents=True, exist_ok=True)
parallel = Parallel(n_jobs=n_cpus)
flags = pkl.load(open(datadir / flags_name, "rb"))
query_type = flags["query_type"]
equiv_probability = flags["equiv_size"]
sim = Driver()
n_reward_features = sim.num_of_features
elicited_normals, elicited_preferences, elicited_input_features = load_elicitation(
datadir=datadir,
normals_name=normals_name,
preferences_name=preferences_name,
input_features_name=input_features_name,
n_reward_features=n_reward_features,
use_equiv=use_equiv,
query_type=query_type,
equiv_probability=equiv_probability,
)
assert elicited_preferences.shape[0] > 0
factory = TestFactory(
query_type=query_type,
reward_dimension=elicited_normals.shape[1],
equiv_probability=equiv_probability,
n_reward_samples=n_model_samples,
use_mean_reward=use_mean_reward,
skip_dedup=skip_remove_duplicates,
skip_noise_filtering=True,
skip_epsilon_filtering=skip_epsilon_filtering,
skip_redundancy_filtering=skip_redundancy_filtering,
)
test_path = outdir / make_outname(
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="indices",
)
test_results_path = outdir / make_outname(
skip_remove_duplicates,
True,
skip_epsilon_filtering,
skip_redundancy_filtering,
base="test_results",
)
minimal_tests: Dict[Experiment, np.ndarray] = load(test_path, overwrite)
results: Dict[Experiment, np.ndarray] = load(test_results_path, overwrite)
test_rewards = (
np.load(open(rewards_path, "rb"))
if rewards_path is not None
else make_gaussian_rewards(n_rewards, use_equiv)
)
np.save(outdir / "test_rewards.npy", test_rewards)
experiments = make_experiments(
epsilons, deltas, human_samples, overwrite, experiments=set(minimal_tests.keys())
)
for indices, result, experiment in parallel(
delayed(run_human_experiment)(
test_rewards,
elicited_normals,
elicited_input_features,
elicited_preferences,
epsilon,
delta,
n,
factory,
use_equiv,
)
for epsilon, delta, n in experiments
):
minimal_tests[experiment] = indices
results[experiment] = result
pkl.dump(minimal_tests, open(test_path, "wb"))
pkl.dump(results, open(test_results_path, "wb"))
def compare_test_labels(
test_rewards_path: Path,
true_reward_path: Path,
traj_opt: bool = False,
elicitation: bool = False,
replications: Optional[str] = None,
normals_path: Optional[Path] = None,
):
if replications is not None:
raise NotImplementedError("Replications not yet implemented")
starting_tests: Dict[float, Tuple[np.ndarray, np.ndarray]] = pkl.load(
open(test_rewards_path, "rb")
)
assert not (traj_opt == elicitation), "Provided labels must come from exactly one source"
class Test(NamedTuple):
rewards: np.ndarray
q_labels: np.ndarray
elicitation_labels: np.ndarray
test_rewards: Dict[float, Test] = {}
true_reward = np.load(true_reward_path)
if traj_opt:
normals = np.load(normals_path)
for epsilon, (rewards, q_labels) in starting_tests.items():
normals = normals[true_reward @ normals.T > epsilon]
elicitation_labels = run_test(normals, rewards, use_equiv=False)
test_rewards[epsilon] = Test(
rewards=rewards, q_labels=q_labels, elicitation_labels=elicitation_labels
)
elif elicitation:
parallel = Parallel(n_cpus=-4)
env = LegacyEnv(reward=true_reward, random_start=True)
traj_optimizer = TrajOptimizer(10)
for epsilon, (rewards, elicitation_labels) in starting_tests.items():
q_labels = rewards_aligned(
traj_optimizer=traj_optimizer,
env=env,
true_reward=true_reward,
test_rewards=rewards,
epsilon=epsilon,
parallel=parallel,
)
test_rewards[epsilon] = Test(
rewards=rewards, q_labels=q_labels, elicitation_labels=elicitation_labels
)
total_agree = 0
total_rewards = 0
for epsilon, test in test_rewards.items():
total_agree += np.sum(test.q_labels == test.elicitation_labels)
total_rewards += len(test.rewards)
print(
f"Critic and superset labels agree on {total_agree / total_rewards * 100 :.1f}% of rewards"
)
# Test reward generation
def make_test_rewards(
epsilons: Sequence[float],
true_reward: np.ndarray,
n_rewards: int,
outdir: Path,
parallel: Parallel,
n_test_states: Optional[int] = None,
traj_opt: bool = False,
max_attempts: int = 10,
n_gt_test_questions: Optional[int] = None,
use_equiv: bool = False,
overwrite: bool = False,
) -> Dict[float, Tuple[np.ndarray, np.ndarray]]:
""" Makes test rewards sets for every epsilon and saves them to a file. """
traj_optimizer = (
TrajOptimizer(n_planner_iters=100, optim=tf.keras.optimizers.Adam(0.2))
if traj_opt
else None
)
reward_path = outdir / "test_rewards.pkl"
test_rewards: Dict[float, Tuple[np.ndarray, np.ndarray]] = load(
reward_path, overwrite=overwrite
)
if test_rewards is None:
test_rewards = {}
else:
logging.info(f"Loading test rewards from {reward_path}")
new_epsilons = set(epsilons) - test_rewards.keys()
if len(new_epsilons) > 0:
logging.info(f"Creating new test rewards for epsilons: {new_epsilons}")
if (n_test_states is not None and n_test_states > 1) or len(new_epsilons) == 1:
# Parallelize internally
test_rewards.update(
{
epsilon: find_reward_boundary(
true_reward=true_reward,
traj_optimizer=traj_optimizer,
n_rewards=n_rewards,
use_equiv=use_equiv,
epsilon=epsilon,
n_test_states=n_test_states,
max_attempts=max_attempts,
outdir=outdir,
n_gt_test_questions=n_gt_test_questions,
overwrite=overwrite,
parallel=parallel,
)[:2]
for epsilon in new_epsilons
}
)
else:
for rewards, alignment, epsilon in parallel(
delayed(find_reward_boundary)(
true_reward=true_reward,
traj_optimizer=traj_optimizer,
n_rewards=n_rewards,
use_equiv=use_equiv,
epsilon=epsilon,
n_test_states=n_test_states,
max_attempts=max_attempts,
n_gt_test_questions=n_gt_test_questions,
outdir=outdir,
overwrite=overwrite,
parallel=None,
)
for epsilon in new_epsilons
):
test_rewards[epsilon] = (rewards, alignment)
logging.info(f"Writing generated test rewards to {reward_path}")
pkl.dump(test_rewards, open(reward_path, "wb"))
return test_rewards
def find_reward_boundary(
true_reward: np.ndarray,
traj_optimizer: Optional[TrajOptimizer],
n_rewards: int,
use_equiv: bool,
epsilon: float,
max_attempts: int,
outdir: Path,
parallel: Parallel,
n_test_states: Optional[int] = None,
n_gt_test_questions: Optional[int] = None,
overwrite: bool = False,
) -> Tuple[np.ndarray, np.ndarray, float]:
""" Finds a ballanced set of test rewards according to a critic and epsilon. """
env = LegacyEnv(reward=true_reward)
# Don't parallelize here if we're only testing at one state
logging.debug(f"# test states={n_test_states}")
parallel = None if n_test_states is None or n_test_states <= 1 else parallel
new_rewards = partial(
make_gaussian_rewards, n_rewards=n_rewards, use_equiv=use_equiv, mean=true_reward
)
get_alignment = partial(
rewards_aligned,
traj_optimizer=traj_optimizer,
env=env,
true_reward=true_reward,
epsilon=epsilon,
parallel=parallel,
n_test_states=n_test_states,
n_questions=n_gt_test_questions,
)
search = TestRewardSearch.load(epsilon=epsilon, path=outdir / "search.pkl", overwrite=overwrite)
if search is None:
search = TestRewardSearch(
epsilon,
cov_search=GeometricSearch(start=1.0),
max_attempts=max_attempts,
outdir=outdir,
new_rewards=new_rewards,
get_alignment=get_alignment,
)
else:
search.new_rewards = new_rewards
search.get_alignment = get_alignment
best_test = search.run()
return best_test.rewards, best_test.alignment, epsilon
def rewards_aligned(
traj_optimizer: Optional[TrajOptimizer],
env: Env,
true_reward: np.ndarray,
test_rewards: np.ndarray,
epsilon: float,
parallel: Optional[Parallel] = None,
n_test_states: Optional[int] = None,
n_questions: int = 100000,
use_equiv: bool = False,
) -> np.ndarray:
""" Determines the epsilon-alignment of a set of test rewards relative to a critic and epsilon. """
# This test can produce both false positives and false negatives
# This test is prone to false positives, but a negative is always a true negative
gt_test = make_gt_test_align(test_rewards, n_questions, true_reward, epsilon, use_equiv)
if traj_optimizer is not None:
traj_opt_alignment = make_traj_opt_align(
traj_optimizer, env, true_reward, test_rewards, epsilon, parallel, n_test_states
)
# Start with traj opt alignment, then mask out all of the rewards that failed the gt test
# x y z
# 0 0 0
# 0 1 0 don't trust y when it says something is aligned if you failed the traj opt
# 1 0 0 if y says it's misaligned, then it is
# 1 1 1
# This is just the & function
alignment = traj_opt_alignment & gt_test
n_masked = np.sum(gt_test & np.logical_not(gt_test))
logging.info(
f"Trajectory optimization labelling produced at least {n_masked} false positives"
)
else:
alignment = gt_test
return alignment
def make_gt_test_align(
test_rewards: np.ndarray,
n_questions: int,
true_reward: np.ndarray,
epsilon: float,
use_equiv: bool = False,
) -> np.ndarray:
env = Driver()
trajs = make_random_questions(n_questions, env)
_, normals = make_normals(trajs, env, use_equiv)
value_diff = true_reward @ normals.T
eps_questions = np.abs(value_diff) > epsilon
normals = normals[eps_questions]
gt_pref = value_diff[eps_questions] > 0
normals = orient_normals(normals, gt_pref, use_equiv)
alignment = cast(np.ndarray, np.all(test_rewards @ normals.T > 0, axis=1))
assert alignment.shape == (
test_rewards.shape[0],
), f"alignment shape={alignment.shape} is not expected {test_rewards.shape[0]}"
return alignment
def make_traj_opt_align(
traj_optimizer: TrajOptimizer,
env: Env,
true_reward: np.ndarray,
test_rewards: np.ndarray,
epsilon: float,
parallel: Optional[Parallel] = None,
n_test_states: Optional[int] = None,
) -> np.ndarray:
state_shape = env.observation_space.sample().shape
action_shape = env.action_space.sample().shape
if n_test_states is not None:
raw_states = np.array(
[
flatten(env.observation_space, env.observation_space.sample())
for _ in range(n_test_states)
]
)
else:
n_test_states = 1
raw_states = np.array([env.state])
assert raw_states.shape == (n_test_states, *state_shape)
opt_plans = make_plans(
true_reward.reshape(1, 4),
raw_states,
traj_optimizer,
parallel,
action_shape,
memorize=True,
)
assert opt_plans.shape == (
1,
n_test_states,
50,
*action_shape,
), f"opt_plans shape={opt_plans.shape} is not expected {(1,n_test_states,50,*action_shape)}"
opt_values: np.ndarray = rollout_plans(env, opt_plans, raw_states)
plans = make_plans(test_rewards, raw_states, traj_optimizer, parallel, action_shape)
assert plans.shape == (
len(test_rewards),
n_test_states,
50,
*action_shape,
), f"plans shape={plans.shape} is not expected {(len(test_rewards),n_test_states,50,*action_shape)}"
values = rollout_plans(env, plans, raw_states)
assert values.shape == (
len(test_rewards),
n_test_states,
), f"Values shape={values.shape} is not expected {(len(test_rewards), n_test_states)}"
alignment = cast(np.ndarray, np.all(opt_values - values < epsilon, axis=1))
return alignment
def rollout_plans(env: LegacyEnv, plans: np.ndarray, states: np.ndarray):
returns = | np.empty((plans.shape[0], plans.shape[1])) | numpy.empty |
"""
Procedures for fitting marginal regression models to dependent data
using Generalized Estimating Equations.
References
----------
<NAME> and <NAME>. "Longitudinal data analysis using
generalized linear models". Biometrika (1986) 73 (1): 13-22.
<NAME> and <NAME>. "Longitudinal Data Analysis for Discrete and
Continuous Outcomes". Biometrics Vol. 42, No. 1 (Mar., 1986),
pp. 121-130
<NAME> and <NAME> (1990). "Hypothesis testing of regression
parameters in semiparametric generalized linear models for cluster
correlated data", Biometrika, 77, 485-497.
<NAME> and <NAME> (2002). "Small sample performance of the score
test in GEE".
http://www.sph.umn.edu/faculty1/wp-content/uploads/2012/11/rr2002-013.pdf
<NAME>, <NAME> (2001). A covariance estimator for GEE with
improved small-sample properties. Biometrics. 2001 Mar;57(1):126-34.
"""
from statsmodels.compat.python import iterkeys, range, lrange, lzip, zip
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.base.model as base
from statsmodels.genmod import families
from statsmodels.genmod import dependence_structures
from statsmodels.genmod.dependence_structures import CovStruct
import statsmodels.genmod.families.varfuncs as varfuncs
from statsmodels.genmod.families.links import Link
from statsmodels.genmod.families import Family
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
# Workaround for block_diag, not available until scipy version
# 0.11. When the statsmodels scipy dependency moves to version 0.11,
# we can remove this function and use:
# from scipy.sparse import block_diag
def block_diag(dblocks, format=None):
from scipy.sparse import bmat
n = len(dblocks)
blocks = []
for i in range(n):
b = [None,] * n
b[i] = dblocks[i]
blocks.append(b)
return bmat(blocks, format)
class ParameterConstraint(object):
"""
A class for managing linear equality constraints for a parameter
vector.
"""
def __init__(self, lhs, rhs, exog):
"""
Parameters:
----------
lhs : ndarray
A q x p matrix which is the left hand side of the
constraint lhs * param = rhs. The number of constraints is
q >= 1 and p is the dimension of the parameter vector.
rhs : ndarray
A 1-dimensional vector of length q which is the right hand
side of the constraint equation.
exog : ndarray
The n x p exognenous data for the full model.
"""
# In case a row or column vector is passed (patsy linear
# constraints passes a column vector).
rhs = np.atleast_1d(rhs.squeeze())
if rhs.ndim > 1:
raise ValueError("The right hand side of the constraint "
"must be a vector.")
if len(rhs) != lhs.shape[0]:
raise ValueError("The number of rows of the left hand "
"side constraint matrix L must equal "
"the length of the right hand side "
"constraint vector R.")
self.lhs = lhs
self.rhs = rhs
# The columns of lhs0 are an orthogonal basis for the
# orthogonal complement to row(lhs), the columns of lhs1 are
# an orthogonal basis for row(lhs). The columns of lhsf =
# [lhs0, lhs1] are mutually orthogonal.
lhs_u, lhs_s, lhs_vt = np.linalg.svd(lhs.T, full_matrices=1)
self.lhs0 = lhs_u[:, len(lhs_s):]
self.lhs1 = lhs_u[:, 0:len(lhs_s)]
self.lhsf = np.hstack((self.lhs0, self.lhs1))
# param0 is one solution to the underdetermined system
# L * param = R.
self.param0 = np.dot(self.lhs1, np.dot(lhs_vt, self.rhs) /
lhs_s)
self._offset_increment = np.dot(exog, self.param0)
self.orig_exog = exog
self.exog_fulltrans = np.dot(exog, self.lhsf)
def offset_increment(self):
"""
Returns a vector that should be added to the offset vector to
accommodate the constraint.
Parameters:
-----------
exog : array-like
The exogeneous data for the model.
"""
return self._offset_increment
def reduced_exog(self):
"""
Returns a linearly transformed exog matrix whose columns span
the constrained model space.
Parameters:
-----------
exog : array-like
The exogeneous data for the model.
"""
return self.exog_fulltrans[:, 0:self.lhs0.shape[1]]
def restore_exog(self):
"""
Returns the full exog matrix before it was reduced to
satisfy the constraint.
"""
return self.orig_exog
def unpack_param(self, params):
"""
Converts the parameter vector `params` from reduced to full
coordinates.
"""
return self.param0 + np.dot(self.lhs0, params)
def unpack_cov(self, bcov):
"""
Converts the covariance matrix `bcov` from reduced to full
coordinates.
"""
return np.dot(self.lhs0, np.dot(bcov, self.lhs0.T))
class GEE(base.Model):
__doc__ = """
Generalized Estimating Equations Models
GEE estimates Generalized Linear Models when the data have a
grouped structure, and the observations are possibly correlated
within groups but not between groups.
Parameters
----------
endog : array-like
1d array of endogenous response values.
exog : array-like
A nobs x k array where `nobs` is the number of
observations and `k` is the number of regressors. An
intercept is not included by default and should be added
by the user. See `statsmodels.tools.add_constant`.
groups : array-like
A 1d array of length `nobs` containing the group labels.
time : array-like
A 2d array of time (or other index) values, used by some
dependence structures to define similarity relationships among
observations within a cluster.
family : family class instance
The default is Gaussian. To specify the binomial
distribution family = sm.family.Binomial(). Each family can
take a link instance as an argument. See
statsmodels.family.family for more information.
cov_struct : CovStruct class instance
The default is Independence. To specify an exchangeable
structure use cov_struct = Exchangeable(). See
statsmodels.genmod.dependence_structures.CovStruct for more
information.
offset : array-like
An offset to be included in the fit. If provided, must be
an array whose length is the number of rows in exog.
dep_data : array-like
Additional data passed to the dependence structure.
constraint : (ndarray, ndarray)
If provided, the constraint is a tuple (L, R) such that the
model parameters are estimated under the constraint L *
param = R, where L is a q x p matrix and R is a
q-dimensional vector. If constraint is provided, a score
test is performed to compare the constrained model to the
unconstrained model.
%(extra_params)s
See Also
--------
statsmodels.families.*
Notes
-----
Only the following combinations make sense for family and link ::
+ ident log logit probit cloglog pow opow nbinom loglog logc
Gaussian | x x x
inv Gaussian | x x x
binomial | x x x x x x x x x
Poission | x x x
neg binomial | x x x x
gamma | x x x
Not all of these link functions are currently available.
Endog and exog are references so that if the data they refer
to are already arrays and these arrays are changed, endog and
exog will change.
The "robust" covariance type is the standard "sandwich estimator"
(e.g. Liang and Zeger (1986)). It is the default here and in most
other packages. The "naive" estimator gives smaller standard
errors, but is only correct if the working correlation structure
is correctly specified. The "bias reduced" estimator of Mancl and
DeRouen (Biometrics, 2001) reduces the downard bias of the robust
estimator.
""" % {'extra_params': base._missing_param_doc}
fit_history = None
cached_means = None
def __init__(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None):
self.ordinal = False
self.nominal = False
self._reset(endog, exog, groups, time=time, family=family,
cov_struct=cov_struct, missing=missing,
offset=offset, dep_data=dep_data,
constraint=constraint)
# All the actions of __init__ should go here
def _reset(self, endog, exog, groups, time=None, family=None,
cov_struct=None, missing='none', offset=None,
dep_data=None, constraint=None):
self.missing = missing
self.dep_data = dep_data
self.constraint = constraint
groups = np.array(groups) # in case groups is pandas
# Pass groups, time, offset, and dep_data so they are
# processed for missing data along with endog and exog.
# Calling super creates self.exog, self.endog, etc. as
# ndarrays and the original exog, endog, etc. are
# self.data.endog, etc.
super(GEE, self).__init__(endog, exog, groups=groups,
time=time, offset=offset,
dep_data=dep_data, missing=missing)
# Handle the family argument
if family is None:
family = families.Gaussian()
else:
if not issubclass(family.__class__, families.Family):
raise ValueError("GEE: `family` must be a genmod "
"family instance")
self.family = family
# Handle the cov_struct argument
if cov_struct is None:
cov_struct = dependence_structures.Independence()
else:
if not issubclass(cov_struct.__class__, CovStruct):
raise ValueError("GEE: `cov_struct` must be a genmod "
"cov_struct instance")
self.cov_struct = cov_struct
if offset is None:
self.offset = np.zeros(self.exog.shape[0],
dtype=np.float64)
else:
self.offset = offset
# Handle the constraint
self.constraint = None
if constraint is not None:
if len(constraint) != 2:
raise ValueError("GEE: `constraint` must be a 2-tuple.")
if constraint[0].shape[1] != self.exog.shape[1]:
raise ValueError("GEE: the left hand side of the "
"constraint must have the same number of columns "
"as the exog matrix.")
self.constraint = ParameterConstraint(constraint[0],
constraint[1],
self.exog)
self.offset += self.constraint.offset_increment()
self.exog = self.constraint.reduced_exog()
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the clusters.
group_labels = sorted(set(groups))
group_indices = dict((s, []) for s in group_labels)
for i in range(len(self.endog)):
group_indices[groups[i]].append(i)
for k in iterkeys(group_indices):
group_indices[k] = np.asarray(group_indices[k])
self.group_indices = group_indices
self.group_labels = group_labels
self.endog_li = self.cluster_list(self.endog)
self.exog_li = self.cluster_list(self.exog)
self.num_group = len(self.endog_li)
# Time defaults to a 1d grid with equal spacing
if self.time is not None:
self.time = np.asarray(self.time, np.float64)
if self.time.ndim == 1:
self.time = self.time[:,None]
self.time_li = self.cluster_list(self.time)
else:
self.time_li = \
[np.arange(len(y), dtype=np.float64)[:, None]
for y in self.endog_li]
self.time = np.concatenate(self.time_li)
self.offset_li = self.cluster_list(self.offset)
if constraint is not None:
self.constraint.exog_fulltrans_li = \
self.cluster_list(self.constraint.exog_fulltrans)
self.family = family
self.cov_struct.initialize(self)
# Total sample size
group_ns = [len(y) for y in self.endog_li]
self.nobs = sum(group_ns)
# mean_deriv is the derivative of E[endog|exog] with respect
# to params
try:
# This custom mean_deriv is currently only used for the
# multinomial logit model
self.mean_deriv = self.family.link.mean_deriv
except AttributeError:
# Otherwise it can be obtained easily from inverse_deriv
mean_deriv_lpr = self.family.link.inverse_deriv
def mean_deriv(exog, lpr):
dmat = exog * mean_deriv_lpr(lpr)[:, None]
return dmat
self.mean_deriv = mean_deriv
# mean_deriv_exog is the derivative of E[endog|exog] with
# respect to exog
try:
# This custom mean_deriv_exog is currently only used for
# the multinomial logit model
self.mean_deriv_exog = self.family.link.mean_deriv_exog
except AttributeError:
# Otherwise it can be obtained easily from inverse_deriv
mean_deriv_lpr = self.family.link.inverse_deriv
def mean_deriv_exog(exog, params):
lpr = np.dot(exog, params)
dmat = np.outer(mean_deriv_lpr(lpr), params)
return dmat
self.mean_deriv_exog = mean_deriv_exog
# Skip the covariance updates if all groups have a single
# observation (reduces to fitting a GLM).
self._do_cov_update = True
if max([len(x) for x in self.endog_li]) == 1:
self._do_cov_update = False
def cluster_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
cluster structure.
"""
if array.ndim == 1:
return [np.array(array[self.group_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.group_indices[k], :])
for k in self.group_labels]
def estimate_scale(self):
"""
Returns an estimate of the scale parameter `phi` at the
current parameter value.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
cached_means = self.cached_means
nobs = self.nobs
exog_dim = exog[0].shape[1]
varfunc = self.family.variance
scale = 0.
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
expval, _ = cached_means[i]
sdev = np.sqrt(varfunc(expval))
resid = (endog[i] - offset[i] - expval) / sdev
scale += np.sum(resid**2)
scale /= (nobs - exog_dim)
return scale
def _update_mean_params(self):
"""
Returns
-------
update : array-like
The update vector such that params + update is the next
iterate when solving the score equations.
score : array-like
The current value of the score equations, not
incorporating the scale parameter. If desired,
multiply this vector by the scale parameter to
incorporate the scale.
"""
endog = self.endog_li
exog = self.exog_li
cached_means = self.cached_means
varfunc = self.family.variance
bmat, score = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None
vinv_d, vinv_resid = tuple(rslt)
bmat += np.dot(dmat.T, vinv_d)
score += np.dot(dmat.T, vinv_resid)
update = np.linalg.solve(bmat, score)
return update, score
def update_cached_means(self, mean_params):
"""
cached_means should always contain the most recent calculation
of the group-wise mean vectors. This function should be
called every time the regression parameters are changed, to
keep the cached means up to date.
"""
endog = self.endog_li
exog = self.exog_li
offset = self.offset_li
linkinv = self.family.link.inverse
self.cached_means = []
for i in range(self.num_group):
if len(endog[i]) == 0:
continue
lpr = offset[i] + np.dot(exog[i], mean_params)
expval = linkinv(lpr)
self.cached_means.append((expval, lpr))
def _covmat(self):
"""
Returns the sampling covariance matrix of the regression
parameters and related quantities.
Returns
-------
robust_covariance : array-like
The robust, or sandwich estimate of the covariance, which
is meaningful even if the working covariance structure is
incorrectly specified.
naive_covariance : array-like
The model-based estimate of the covariance, which is
meaningful if the covariance structure is correctly
specified.
robust_covariance_bc : array-like
The "bias corrected" robust covariance of Mancl and
DeRouen.
cmat : array-like
The center matrix of the sandwich expression, used in
obtaining score test results.
"""
endog = self.endog_li
exog = self.exog_li
varfunc = self.family.variance
cached_means = self.cached_means
# Calculate the naive (model-based) and robust (sandwich)
# covariances.
bmat, cmat = 0, 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (dmat, resid))
if rslt is None:
return None, None, None, None
vinv_d, vinv_resid = tuple(rslt)
bmat += np.dot(dmat.T, vinv_d)
dvinv_resid = np.dot(dmat.T, vinv_resid)
cmat += np.outer(dvinv_resid, dvinv_resid)
scale = self.estimate_scale()
bmati = np.linalg.inv(bmat)
naive_covariance = bmati * scale
robust_covariance = np.dot(bmati, np.dot(cmat, bmati))
# Calculate the bias-corrected sandwich estimate of Mancl and
# DeRouen (requires naive_covariance so cannot be calculated
# in the previous loop).
bcm = 0
for i in range(self.num_group):
expval, lpr = cached_means[i]
resid = endog[i] - expval
dmat = self.mean_deriv(exog[i], lpr)
sdev = np.sqrt(varfunc(expval))
rslt = self.cov_struct.covariance_matrix_solve(expval,
i, sdev, (dmat,))
if rslt is None:
return None, None, None, None
vinv_d = rslt[0]
vinv_d /= scale
hmat = np.dot(vinv_d, naive_covariance)
hmat = np.dot(hmat, dmat.T).T
aresid = np.linalg.solve(np.eye(len(resid)) - hmat, resid)
rslt = self.cov_struct.covariance_matrix_solve(expval, i,
sdev, (aresid,))
if rslt is None:
return None, None, None, None
srt = rslt[0]
srt = np.dot(dmat.T, srt) / scale
bcm += np.outer(srt, srt)
robust_covariance_bc = np.dot(naive_covariance,
np.dot(bcm, naive_covariance))
return (robust_covariance, naive_covariance,
robust_covariance_bc, cmat)
def predict(self, params, exog=None, offset=None, linear=False):
"""
Return predicted values for a design matrix
Parameters
----------
params : array-like
Parameters / coefficients of a GLM.
exog : array-like, optional
Design / exogenous data. If exog is None, model exog is
used.
offset : array-like, optional
Offset for exog if provided. If offset is None, model
offset is used.
linear : bool
If True, returns the linear predicted values. If False,
returns the value of the inverse of the model's link
function at the linear predicted values.
Returns
-------
An array of fitted values
"""
if exog is None:
exog = self.exog
offset = self.offset
else:
if offset is None:
offset = 0
fitted = offset + np.dot(exog, params)
if not linear:
fitted = self.family.link(fitted)
return fitted
def _starting_params(self):
"""
Returns a starting value for the mean parameters and a list of
variable names.
"""
from statsmodels.genmod.dependence_structures import (
GlobalOddsRatio, Independence)
dm = self.exog.shape[1]
if isinstance(self.cov_struct, GlobalOddsRatio):
ind = Independence()
md = GEE(self.endog, self.exog, self.groups,
time=self.time, family=self.family,
offset=self.offset)
mdf = md.fit()
return mdf.params
else:
return np.zeros(dm, dtype=np.float64)
def fit(self, maxiter=60, ctol=1e-6, start_params=None,
params_niter=1, first_dep_update=0,
covariance_type='robust'):
"""
Fits a GEE model.
Parameters
----------
maxiter : integer
The maximum number of iterations
ctol : float
The convergence criterion for stopping the Gauss-Seidel
iterations
start_params : array-like
A vector of starting values for the regression
coefficients. If None, a default is chosen.
params_niter : integer
The number of Gauss-Seidel updates of the mean structure
parameters that take place prior to each update of the
dependence structure.
first_dep_update : integer
No dependence structure updates occur before this
iteration number.
covariance_type : string
One of "robust", "naive", or "bias_reduced".
Returns
-------
An instance of the GEEResults class
Notes
-----
If convergence difficulties occur, increase the values of
`first_dep_update` and/or `params_niter`. Setting
`first_dep_update` to a greater value (e.g. ~10-20) causes the
algorithm to move close to the GLM solution before attempting
to identify the dependence structure.
For the Gaussian family, there is no benefit to setting
`params_niter` to a value greater than 1, since the mean
structure parameters converge in one step.
"""
self.fit_history = {'params': [],
'score': [],
'dep_params': []}
if start_params is None:
mean_params = self._starting_params()
else:
mean_params = start_params.copy()
self.update_cached_means(mean_params)
del_params = -1.
num_assoc_updates = 0
for itr in range(maxiter):
update, score = self._update_mean_params()
if update is None:
warnings.warn("Singular matrix encountered in GEE update",
ConvergenceWarning)
break
mean_params += update
self.update_cached_means(mean_params)
# L2 norm of the change in mean structure parameters at
# this iteration.
del_params = np.sqrt(np.sum(score**2))
self.fit_history['params'].append(mean_params.copy())
self.fit_history['score'].append(score)
self.fit_history['dep_params'].append(
self.cov_struct.dep_params)
# Don't exit until the association parameters have been
# updated at least once.
if del_params < ctol and num_assoc_updates > 0:
break
if self._do_cov_update and (itr % params_niter) == 0\
and (itr >= first_dep_update):
self._update_assoc(mean_params)
num_assoc_updates += 1
if del_params >= ctol:
warnings.warn("Iteration limit reached prior to convergence",
IterationLimitWarning)
if mean_params is None:
warnings.warn("Unable to estimate GEE parameters.",
ConvergenceWarning)
return None
bcov, ncov, bc_cov, _ = self._covmat()
if bcov is None:
warnings.warn("Estimated covariance structure for GEE "
"estimates is singular", ConvergenceWarning)
return None
if self.constraint is not None:
mean_params, bcov = self._handle_constraint(mean_params, bcov)
if mean_params is None:
warnings.warn("Unable to estimate constrained GEE "
"parameters.", ConvergenceWarning)
return None
scale = self.estimate_scale()
# The superclass constructor will multiply the covariance
# matrix argument bcov by scale, which we don't want, so we
# divide bcov by the scale parameter here
results = GEEResults(self, mean_params, bcov / scale, scale)
results.covariance_type = covariance_type
results.fit_history = self.fit_history
results.naive_covariance = ncov
results.robust_covariance_bc = bc_cov
results.score_norm = del_params
results.converged = (del_params < ctol)
results.cov_struct = self.cov_struct
return results
def _handle_constraint(self, mean_params, bcov):
"""
Expand the parameter estimate `mean_params` and covariance matrix
`bcov` to the coordinate system of the unconstrained model.
Parameters:
-----------
mean_params : array-like
A parameter vector estimate for the reduced model.
bcov : array-like
The covariance matrix of mean_params.
Returns:
--------
mean_params : array-like
The input parameter vector mean_params, expanded to the
coordinate system of the full model
bcov : array-like
The input covariance matrix bcov, expanded to the
coordinate system of the full model
"""
# The number of variables in the full model
red_p = len(mean_params)
full_p = self.constraint.lhs.shape[1]
mean_params0 = np.r_[mean_params, np.zeros(full_p - red_p)]
# Get the score vector under the full model.
save_exog_li = self.exog_li
self.exog_li = self.constraint.exog_fulltrans_li
import copy
save_cached_means = copy.deepcopy(self.cached_means)
self.update_cached_means(mean_params0)
_, score = self._update_mean_params()
if score is None:
warnings.warn("Singular matrix encountered in GEE score test",
ConvergenceWarning)
return None, None
_, ncov1, _, cmat = self._covmat()
scale = self.estimate_scale()
cmat = cmat / scale**2
score2 = score[len(mean_params):] * scale
amat = np.linalg.inv(ncov1)
bmat_11 = cmat[0:red_p, 0:red_p]
bmat_22 = cmat[red_p:, red_p:]
bmat_12 = cmat[0:red_p, red_p:]
amat_11 = amat[0:red_p, 0:red_p]
amat_12 = amat[0:red_p, red_p:]
score_cov = bmat_22 - \
np.dot(amat_12.T, np.linalg.solve(amat_11, bmat_12))
score_cov -= np.dot(bmat_12.T,
np.linalg.solve(amat_11, amat_12))
score_cov += np.dot(amat_12.T,
np.dot(np.linalg.solve(amat_11, bmat_11),
np.linalg.solve(amat_11, amat_12)))
from scipy.stats.distributions import chi2
score_statistic = np.dot(score2,
np.linalg.solve(score_cov, score2))
score_df = len(score2)
score_pvalue = 1 - chi2.cdf(score_statistic, score_df)
self.score_test_results = {"statistic": score_statistic,
"df": score_df,
"p-value": score_pvalue}
mean_params = self.constraint.unpack_param(mean_params)
bcov = self.constraint.unpack_cov(bcov)
self.exog_li = save_exog_li
self.cached_means = save_cached_means
self.exog = self.constraint.restore_exog()
return mean_params, bcov
def _update_assoc(self, params):
"""
Update the association parameters
"""
self.cov_struct.update(params)
def _derivative_exog(self, params, exog=None, transform='dydx',
dummy_idx=None, count_idx=None):
"""
For computing marginal effects returns dF(XB) / dX where F(.) is
the predicted probabilities
transform can be 'dydx', 'dyex', 'eydx', or 'eyex'.
Not all of these make sense in the presence of discrete regressors,
but checks are done in the results in get_margeff.
"""
#note, this form should be appropriate for
## group 1 probit, logit, logistic, cloglog, heckprob, xtprobit
if exog is None:
exog = self.exog
margeff = self.mean_deriv_exog(exog, params)
# lpr = np.dot(exog, params)
# margeff = (self.mean_deriv(exog, lpr) / exog) * params
# margeff = np.dot(self.pdf(np.dot(exog, params))[:, None],
# params[None,:])
if 'ex' in transform:
margeff *= exog
if 'ey' in transform:
margeff /= self.predict(params, exog)[:, None]
if count_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_count_effects)
margeff = _get_count_effects(margeff, exog, count_idx, transform,
self, params)
if dummy_idx is not None:
from statsmodels.discrete.discrete_margins import (
_get_dummy_effects)
margeff = _get_dummy_effects(margeff, exog, dummy_idx, transform,
self, params)
return margeff
def setup_ordinal(self):
"""
Restructure ordinal data as binary indicators so that they can
be analysed using Generalized Estimating Equations.
"""
self.endog_orig = self.endog.copy()
self.exog_orig = self.exog.copy()
self.groups_orig = self.groups.copy()
self.exog_names_orig = list(self.exog_names)
# The unique outcomes, except the greatest one.
self.endog_values = | np.unique(self.endog) | numpy.unique |
import os, sys
import numpy as np
import pandas as pd
from datetime import datetime as dt,timedelta
import requests
import urllib
import matplotlib.dates as mdates
import matplotlib.colors as mcolors
import matplotlib as mlib
import warnings
def find_var(request,thresh):
r"""
Given a request and threshold, returns the variable for plotting. Referenced from ``TrackDataset.gridded_stats()`` and ``TrackPlot.plot_gridded()``. Internal function.
Parameters
----------
request : str
Descriptor of the requested plot. Detailed more in the ``TrackDataset.gridded_dataset()`` function.
thresh : dict
Dictionary containing thresholds for the plot. Detailed more in the ``TrackDataset.gridded_dataset()`` function.
Returns
-------
thresh : dict
Returns the thresh dictionary, modified depending on the request.
varname : str
String denoting the variable for plotting.
"""
#Convert command to lowercase
request = request.lower()
#Count of number of storms
if request.find('count') >= 0 or request.find('num') >= 0:
return thresh, 'date' #not sure what date stands for
#Sustained wind, or change in wind speed
if request.find('wind') >= 0 or request.find('vmax') >= 0:
#If change in wind, determine time interval
if request.find('change') >= 0:
try:
thresh['dt_window'] = int(''.join([c for i,c in enumerate(request) \
if c.isdigit() and i > request.find('hour')-4]))
except:
raise RuntimeError("Error: specify time interval (hours)")
return thresh,'dvmax_dt'
#Otherwise, sustained wind
else:
return thresh,'vmax'
#Minimum MSLP, or change in MSLP
elif request.find('pressure') >= 0 or request.find('slp') >= 0:
#If change in MSLP, determine time interval
if request.find('change') >= 0:
try:
thresh['dt_window'] = int(''.join([c for i,c in enumerate(request) \
if c.isdigit() and i > request.find('hour')-4]))
except:
raise RuntimeError("Error: specify time interval (hours)")
return thresh,'dmslp_dt'
#Otherwise, minimum MSLP
else:
return thresh,'mslp'
#Storm motion or heading (vector)
elif request.find('heading') >= 0 or request.find('movement') >= 0 or request.find('motion') >= 0:
return thresh,('dx_dt','dy_dt')
#Otherwise, error
else:
msg = "Error: Could not decipher variable. Please refer to documentation for examples on how to phrase the \"request\" string."
raise RuntimeError(msg)
def find_func(request,thresh):
r"""
Given a request and threshold, returns the requested function. Referenced from ``TrackDataset.gridded_stats()``. Internal function.
Parameters
----------
request : str
Descriptor of the requested plot. Detailed more in the ``TrackDataset.gridded_dataset()`` function.
thresh : dict
Dictionary containing thresholds for the plot. Detailed more in the ``TrackDataset.gridded_dataset()`` function.
Returns
-------
thresh : dict
Returns the thresh dictionary, modified depending on the request.
func : lambda
Returns a function to apply to the data.
"""
#Convert command to lowercase
request = request.lower()
#Numpy maximum function
if request.find('max') == 0:
return thresh, lambda x: np.nanmax(x)
#Numpy minimum function
if request.find('min') == 0:
return thresh, lambda x: np.nanmin(x)
#Numpy average function
elif request.find('mean') >= 0 or request.find('average') >= 0 or request.find('avg') >= 0:
thresh['sample_min'] = max([5,thresh['sample_min']]) #Ensure sample minimum is at least 5 per gridpoint
return thresh, lambda x: np.nanmean(x)
#Numpy percentile function
elif request.find('percentile') >= 0:
ptile = int(''.join([c for i,c in enumerate(request) if c.isdigit() and i < request.find('percentile')]))
thresh['sample_min'] = max([5,thresh['sample_min']]) #Ensure sample minimum is at least 5 per gridpoint
return thresh, lambda x: np.nanpercentile(x,ptile)
#Count function
elif request.find('count') >= 0 or request.find('num') >= 0:
return thresh, lambda x: len(x)
#Otherwise, function cannot be identified
else:
msg = "Cannot decipher the function. Please refer to documentation for examples on how to phrase the \"request\" string."
raise RuntimeError(msg)
def construct_title(thresh):
r"""
Construct a plot title for ``TrackDataset.gridded_stats()``. Internal function.
Parameters
----------
thresh : dict
Dictionary containing thresholds for the plot. Detailed more in the ``TrackDataset.gridded_dataset()`` function.
Returns
-------
thresh : dict
Returns the thresh dictionary, modified depending on the threshold(s) specified.
plot_subtitle : str
String denoting the title for the plot.
"""
#List containing entry for plot title, later merged into a string
plot_subtitle = []
#Symbols for greater/less than or equal to signs
gteq = u"\u2265"
lteq = u"\u2264"
#Add sample minimum
if not np.isnan(thresh['sample_min']):
plot_subtitle.append(f"{gteq} {thresh['sample_min']} storms/bin")
else:
thresh['sample_min'] = 0
#Add minimum wind speed
if not | np.isnan(thresh['v_min']) | numpy.isnan |
import sklearn as sk
import numpy as np
import h5py
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier
from sklearn.model_selection import GridSearchCV
# import xgboost as xgb
# from xgboost.sklearn import XGBClassifier
from sklearn.metrics import classification_report
from sklearn.svm import SVC
from sklearn.metrics import f1_score, make_scorer, precision_score
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
from utilities import calculate_accuracy, calculate_confusion_matrix, plot_confusion_matrix2, print_accuracy
import config as cfg
def prepare_data(datatype):
workspace = os.path.join(os.path.expanduser('~'), "Downloads/dcase2018_task1-master")
truncation_dir = os.path.join(workspace, 'features', 'truncation',
'holdout_fold={}'.format(1))
if datatype == 'train':
hf = h5py.File(os.path.join(truncation_dir, 'train_hpss_l+r_9100.h5'), 'r')
features = hf['feature'][:]
targets = hf['target'][:]
return features, np.argmax(targets, axis=-1)
elif datatype == 'validate':
hf = h5py.File(os.path.join(truncation_dir, 'validate_hpss_l+r_9100.h5'), 'r')
features = hf['feature'][:]
targets = hf['target'][:]
return features, np.argmax(targets, axis=-1)
def model_validate(classifier, class_wise_accuracy=False, plot_confusion_matrix=False):
x_val, y_val = prepare_data(datatype='validate')
if class_wise_accuracy:
predict = classifier.predict(x_val)
if plot_confusion_matrix:
cm = calculate_confusion_matrix(y_val, predict, 10)
plot_confusion_matrix2(cm, "svm", cfg.labels)
class_wise_accuracy = calculate_accuracy(y_val, predict, 10)
print_accuracy(class_wise_accuracy, cfg.labels)
score = | np.mean(class_wise_accuracy) | numpy.mean |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
| Featurizers for pretrain-gnn.
| Adapted from https://github.com/snap-stanford/pretrain-gnns/tree/master/chem/utils.py
"""
import numpy as np
import networkx as nx
import pgl
from rdkit.Chem import AllChem
from pahelix.featurizers.featurizer import Featurizer
from pahelix.utils.compound_tools import mol_to_graph_data
__all__ = [
'PreGNNAttrMaskFeaturizer',
'PreGNNSupervisedFeaturizer',
'PreGNNContextPredFeaturizer',
]
class PreGNNAttrMaskFeaturizer(Featurizer):
"""Featurizer for attribute mask model of pretrain gnns"""
def __init__(self, graph_wrapper, atom_type_num=None, mask_ratio=None):
super(PreGNNAttrMaskFeaturizer, self).__init__()
self.graph_wrapper = graph_wrapper
self.atom_type_num = atom_type_num
self.mask_ratio = mask_ratio
def gen_features(self, raw_data):
"""Convert smiles into graph data.
Returns:
data(dict): a dict of numpy ndarray consists of graph features.
"""
smiles = raw_data['smiles']
mol = AllChem.MolFromSmiles(smiles)
if mol is None:
return None
data = mol_to_graph_data(mol)
data['smiles'] = smiles
return data
def collate_fn(self, batch_data_list):
"""Aggregate a list of graph data into a batch data"""
g_list = []
for data in batch_data_list:
g = pgl.graph.Graph(num_nodes = len(data['atom_type']),
edges = data['edges'],
node_feat = {
'atom_type': data['atom_type'].reshape([-1, 1]),
'chirality_tag': data['chirality_tag'].reshape([-1, 1]),
},
edge_feat ={
'bond_type': data['bond_type'].reshape([-1, 1]),
'bond_direction': data['bond_direction'].reshape([-1, 1]),
})
g_list.append(g)
join_graph = pgl.graph.MultiGraph(g_list)
### mask atom
num_node = len(join_graph.node_feat['atom_type'])
masked_size = int(num_node * self.mask_ratio)
masked_node_indice = np.random.choice(range(num_node), size=masked_size, replace=False)
masked_node_labels = join_graph.node_feat['atom_type'][masked_node_indice]
join_graph.node_feat['atom_type'][masked_node_indice] = self.atom_type_num
join_graph.node_feat['chirality_tag'][masked_node_indice] = 0
feed_dict = self.graph_wrapper.to_feed(join_graph)
feed_dict['masked_node_indice'] = np.reshape(masked_node_indice, [-1, 1]).astype('int64')
feed_dict['masked_node_label'] = np.reshape(masked_node_labels, [-1, 1]).astype('int64')
return feed_dict
class PreGNNSupervisedFeaturizer(Featurizer):
"""Featurizer for supervised model of pretrain gnns"""
def __init__(self, graph_wrapper):
super(PreGNNSupervisedFeaturizer, self).__init__()
self.graph_wrapper = graph_wrapper
def gen_features(self, raw_data):
"""Convert smiles into graph data.
Returns:
data(dict): a dict of numpy ndarray consists of graph features.
"""
smiles, label = raw_data['smiles'], raw_data['label']
mol = AllChem.MolFromSmiles(smiles)
if mol is None:
return None
data = mol_to_graph_data(mol)
data['label'] = label.reshape([-1])
data['smiles'] = smiles
return data
def collate_fn(self, batch_data_list):
"""Aggregate a list of graph data into a batch data"""
g_list = []
label_list = []
for data in batch_data_list:
g = pgl.graph.Graph(num_nodes = len(data['atom_type']),
edges = data['edges'],
node_feat = {
'atom_type': data['atom_type'].reshape([-1, 1]),
'chirality_tag': data['chirality_tag'].reshape([-1, 1]),
},
edge_feat ={
'bond_type': data['bond_type'].reshape([-1, 1]),
'bond_direction': data['bond_direction'].reshape([-1, 1]),
})
g_list.append(g)
label_list.append(data['label'])
join_graph = pgl.graph.MultiGraph(g_list)
feed_dict = self.graph_wrapper.to_feed(join_graph)
batch_label = np.array(label_list)
batch_label = ((batch_label + 1.0) / 2).astype('float32')
batch_valid = (batch_label != 0.5).astype("float32")
feed_dict['supervised_label'] = batch_label
feed_dict['valid'] = batch_valid
return feed_dict
def reset_idxes(G):
"""
Resets node indices such that they are numbered from 0 to num_nodes - 1
Args:
G: network x object.
Returns:
new_G: copy of G with relabelled node indices.
mapping:
"""
mapping = {}
for new_idx, old_idx in enumerate(G.nodes()):
mapping[old_idx] = new_idx
new_G = nx.relabel_nodes(G, mapping, copy=True)
return new_G, mapping
def graph_data_obj_to_nx_simple(data):
"""
Converts graph data object into a network x data object.
NB: Uses simplified atom and bond features, and represent as indices.
NB: possible issues with recapitulating relative stereochemistry since the edges in the nx object are unordered.
Args:
data(dict): a dict of numpy ndarray consists of graph features.
Returns:
G: a network x object
"""
G = nx.Graph()
# atoms
# atom_features = data['node_feat']
num_atoms = data['atom_type'].shape[0]
for i in range(num_atoms):
# atomic_num_idx, chirality_tag_idx = atom_features[i]
G.add_node(i,
atom_num_idx=data['atom_type'][i],
chirality_tag_idx=data['chirality_tag'][i])
# bonds
edge_index = data['edges']
# edge_attr = data['edge_feat']
num_bonds = edge_index.shape[0]
for j in range(0, num_bonds, 2):
begin_idx = int(edge_index[j, 0])
end_idx = int(edge_index[j, 1])
# bond_type_idx, bond_dir_idx = edge_attr[j]
if not G.has_edge(begin_idx, end_idx):
G.add_edge(begin_idx, end_idx,
bond_type_idx=data['bond_type'][j],
bond_dir_idx=data['bond_direction'][j])
return G
def nx_to_graph_data_obj_simple(G):
"""
Converts nx graph to graph data. Assume node indices are numbered from
0 to num_nodes - 1.
NB: Uses simplified atom and bond features, and represent as indices.
NB: possible issues with recapitulating relative stereochemistry since the edges in the nx object are unordered.
Args:
G: nx graph object
Returns:
data(dict): a dict of numpy ndarray consists of graph features.
"""
# atoms
atom_types = []
chirality_tags = []
for _, node in G.nodes(data=True):
atom_types.append(node['atom_num_idx'])
chirality_tags.append(node['chirality_tag_idx'])
atom_types = np.array(atom_types)
chirality_tags = np.array(chirality_tags)
# bonds
# num_bond_features = 2 # bond type, bond direction
if len(G.edges()) > 0: # mol has bonds
edges = []
bond_types = []
bond_directions = []
for i, j, edge in G.edges(data=True):
# edge_feature = [edge['bond_type_idx'], edge['bond_dir_idx']]
edges.append((i, j))
bond_types.append(edge['bond_type_idx'])
bond_directions.append(edge['bond_dir_idx'])
edges.append((j, i))
bond_types.append(edge['bond_type_idx'])
bond_directions.append(edge['bond_dir_idx'])
edges = np.array(edges)
bond_types = np.array(bond_types)
bond_directions = np.array(bond_directions)
else: # mol has no bonds
edges = np.zeros((0, 2)).astype("int")
bond_types = | np.zeros((0,)) | numpy.zeros |
import numpy as np
import pandas as pd
def batch_df2batch(df, evaluate_ids=(), n_obs=-1, tform=np.eye(3), is_vehicles_evaluated=False):
"""
Convert dataframe to SGAN input
:param df:
:param evaluate_ids:
:param n_obs: number of timesteps observed
:param tform:
:param is_vehicles_evaluated:
:return:
"""
if is_vehicles_evaluated:
agent_ids = np.unique(df['agent_id'])
else:
agent_ids = np.unique(df[df['agent_type'] == 0]['agent_id']) # peds only
# input transform
df = tform_df(df, tform)
# assume min t is the start
t_inds = np.unique(np.sort(df['t']))
t0 = t_inds[0]
skip = t_inds[1] - t_inds[0]
abs_xy = np.zeros((n_obs, agent_ids.size, 2), dtype=np.float32)
rel_xy = np.zeros_like(abs_xy)
for i, agent_id in enumerate(agent_ids):
for step, t in enumerate(range(t0, t0+n_obs*skip, skip)):
xy = df[(df['agent_id'] == agent_id) & (df['t'] == t)][['x', 'y']]
if xy.size > 0:
abs_xy[step, i, :] = xy.values[0]
else:
abs_xy[step, i, :] = np.nan
# for relative, 1st entry is 0,0, rest are the differences
rel_xy[1:, i, :] = abs_xy[1:, i, :] - abs_xy[:-1, i, :]
# handle observations w/zeros
abs_xy[np.isnan(abs_xy)] = 0.
rel_xy[np.isnan(rel_xy)] = 0.
seq_start_end = [(0, agent_ids.size)]
return abs_xy, rel_xy, seq_start_end
def raw_pred2df(pred_list, evaluate_ids, evaluate_inds, tform=np.eye(3)):
"""
:param pred_list: [i] = n_preds, n_peds, 2 | list of sampled predictions
- n_preds = number of timesteps predicted into future
:param evaluate_ids: list of agent ids
:param evaluate_inds: [i] = index of agent_id=evaluate_ids[i] in prediction
:param tform: (3,3) | transformation matrix
:return:
"""
merged_peds = np.stack(pred_list, axis=-1) # (n_preds, n_peds, 2, n_samples)
n_preds = merged_peds.shape[0]
n_samples = merged_peds.shape[3]
cols = ['t', 'agent_id', 'x', 'y', 'sample_id', 'p']
INT_COLUMNS = [cols[i] for i in [0, 1, -2]]
data = []
for ind, id in zip(evaluate_inds, evaluate_ids):
for t in range(n_preds):
z = | np.zeros((n_samples, 1)) | numpy.zeros |
"""
Mnist Main agent, as mentioned in the tutorial
"""
import numpy as np
from tqdm import tqdm
import shutil
import random
import torch
from torch import nn
from torch.backends import cudnn
import torch.optim as optim
from agents.base import BaseAgent
from graphs.models.custom_unet import EEG_CNN, EEG_CNN_2, EEG_CNN_3, EEG_CNN_4, EEG_CNN_5, EEG_CNN_6, EEG_UCnet_1, EEG_UCnet_2
from utils.misc import print_cuda_statistics
import copy
import time
from sklearn.metrics import f1_score, cohen_kappa_score, roc_auc_score, confusion_matrix
import matplotlib.pyplot as plt
from datasets.sleepset import SleepDataLoader
# from pynvml import *
# from utils.lr_finders.lr_finder_eeg import LRFinder
cudnn.benchmark = True
class Sleep_Agent_Init_Train_EEG_UC(BaseAgent):
def __init__(self, config):
super().__init__(config)
torch.backends.cudnn.enabled = False
torch.manual_seed(self.config.seed)
torch.cuda.manual_seed(self.config.seed)
torch.cuda.manual_seed_all(self.config.seed) # if you are using multi-GPU.
np.random.seed(self.config.seed) # Numpy module.
random.seed(self.config.seed) # Python random module.
torch.manual_seed(self.config.seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
self.data_loader = SleepDataLoader(config=config)
# self.model = EEG_Unet_1(1,1)
self.model = EEG_UCnet_2()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.config.learning_rate, betas=(self.config.beta1, self.config.beta2), eps = 1e-08,
weight_decay=self.config.weight_decay)
# self.scheduler = optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.config.max_epoch)
self.scheduler = optim.lr_scheduler.CyclicLR(self.optimizer, base_lr=self.config.learning_rate, max_lr=self.config.max_lr, cycle_momentum=False)
# self.weights = torch.from_numpy(self.data_loader.weights).float()
# initialize counter
self.weights = torch.from_numpy(self.data_loader.weights).float()
print("Available cuda devices: {}, current device:{}".format(torch. cuda. device_count(),torch.cuda.current_device()))
# nvmlInit()
# for i in self.config.gpu_device:
# h = nvmlDeviceGetHandleByIndex(i)
# info = nvmlDeviceGetMemoryInfo(h)
# print(f'total : {info.total/(1024*1024)}')
# print(f'free : {info.free/(1024*1024)}')
# print(f'used : {info.used/(1024*1024)}')
torch.cuda.manual_seed(self.config.seed)
# self.device = "cuda:{}".format(self.config.gpu_device[0])
self.device = torch.device(self.config.gpu_device[0])
self.model = self.model.to(self.device)
self.model = nn.DataParallel(self.model, device_ids=[torch.device(i) for i in self.config.gpu_device])
self.best_model = copy.deepcopy(self.model)
self.loss = nn.CrossEntropyLoss(self.weights.to(self.device))
self.loss2 = nn.MSELoss()
self.current_epoch = 0
self.train_logs = torch.empty((self.config.max_epoch,12)).to(self.device)
self.best_logs = torch.zeros(5).to(self.device)
self.test_logs = torch.empty((self.config.max_epoch,7)).to(self.device)
"""
Code to run the learning rate finder, be careful it might need some changes on dataloaders at the source code.
lr_finder = LRFinder(self.model, self.optimizer, self.loss, device=self.device)
lr_finder.range_test(self.data_loader.train_loader, end_lr=100, num_iter=100)
_, lr = lr_finder.plot() # to inspect the loss-learning rate graph
lr_finder.reset() # to reset the model and optimizer to their initial state
"""
self.logger.info("Program will run on *****GPU-CUDA***** ")
print_cuda_statistics()
def load_checkpoint(self, file_name):
"""
Latest checkpoint loader
:param file_name: name of the checkpoint file
:return:
"""
print("Loading from file {}".format(file_name))
checkpoint = torch.load(file_name)
self.model.load_state_dict(checkpoint["model_state_dict"])
self.best_model.load_state_dict(checkpoint["best_model_state_dict"])
self.optimizer.load_state_dict(checkpoint["optimizer_state_dict"])
self.train_logs[0:checkpoint["train_logs"].shape[0],:] = checkpoint["train_logs"]
self.test_logs[0:checkpoint["test_logs"].shape[0],:] = checkpoint["test_logs"]
self.current_epoch = checkpoint["epoch"]
self.best_logs = checkpoint["best_logs"]
print("Model has loaded successfully")
def save_checkpoint(self, file_name="checkpoint.pth.tar"):
"""
Checkpoint saver
:param file_name: name of the checkpoint file
:param is_best: boolean flag to indicate whether current checkpoint's metric is the best so far
:return:
"""
save_dict = {}
savior = {}
savior["model_state_dict"] = self.model.state_dict()
savior["best_model_state_dict"] = self.best_model.state_dict()
savior["optimizer_state_dict"] = self.optimizer.state_dict()
savior["train_logs"] = self.train_logs
savior["test_logs"] = self.test_logs
savior["epoch"] = self.current_epoch
savior["best_logs"] = self.best_logs
save_dict.update(savior)
try:
torch.save(save_dict, file_name)
print("Models has saved successfully")
except:
raise Exception("Problem in model saving")
def save_encoder(self, file_name="checkpoint_encoder.pth.tar"):
"""
Checkpoint saver
:param file_name: name of the checkpoint file
:param is_best: boolean flag to indicate whether current checkpoint's metric is the best so far
:return:
"""
save_dict = {}
savior = {}
savior["encoder_state_dict"] = self.best_model.module.encoder.state_dict()
save_dict.update(savior)
try:
torch.save(save_dict, file_name)
print("Models has saved successfully")
except:
raise Exception("Problem in model saving")
def run(self):
"""
The main operator
:return:
"""
try:
if self.config.load_ongoing:
self.load_checkpoint(self.config.save_dir)
print("The best in epoch:", self.best_logs[4].item(), "so far acc:",self.best_logs[1].item()," and f1:",self.best_logs[2].item())
test_loss, test_acc, test_f1, test_k, test_auc, test_conf, test_loss1, test_loss2 = self.test()
print("Test accuracy: {0:.2f}% and f1: {1:.4f}".format(test_acc * 100, test_f1))
print("Test loss1: {0:5f}% and loss2: {1:.5f}".format(test_loss1, test_loss2))
print("Test kappa: {0:.4f}% and auc: {1:.4f}".format(test_k, test_auc))
print("Test confusion matrix:")
print(test_conf)
self.train()
# self.save_encoder()
self.model = self.best_model
self.save_encoder(self.config.save_dir_encoder)
val_loss, val_acc, val_f1, val_k,_,_ = self.validate()
print("Validation accuracy: {0:.2f}% and f1: {1:.4f} and k: {2:.4f}".format(val_acc*100, val_f1, val_k))
test_loss, test_acc, test_f1, test_k, test_auc, test_conf, test_loss1, test_loss2 = self.test()
print("Test accuracy: {0:.2f}% and f1: {1:.4f}".format(test_acc*100,test_f1))
print("Test kappa: {0:.4f}% and auc: {1:.4f}".format(test_k,test_auc))
print("Test confusion matrix:")
print(test_conf)
except KeyboardInterrupt:
self.logger.info("You have entered CTRL+C.. Wait to finalize")
def train(self):
"""
Main training loop
:return:
"""
print('we are training model normally')
if not self.config.load_ongoing:
self.best_model = copy.deepcopy(self.model)
epochs_no_improve, early_stop = 0, False
self.best_logs[1] = 1000
self.test_it = self.current_epoch
for self.current_epoch in range(self.current_epoch, self.config.max_epoch):
for param_group in self.optimizer.param_groups:
lr = param_group['lr']
print("We have learning rate: {0:.5f}".format(lr))
start = time.time()
train_loss, train_acc, train_f1, train_k, train_loss1, train_loss2 = self.train_one_epoch()
val_loss, val_acc, val_f1, val_k, val_loss1,val_loss2 = self.validate()
self.train_logs[self.current_epoch] = torch.tensor([val_loss,train_loss,train_acc,val_acc,train_f1,val_f1,train_k,val_k,train_loss1, val_loss1, train_loss2, val_loss2],device=self.device)
if self.config.validation:
not_saved = True
print("Epoch {0:d} Validation loss: {1:.6f}, accuracy: {2:.2f}% f1 :{3:.4f}, k :{4:.4f} Training loss: {5:.6f}, accuracy: {6:.2f}% f1 :{7:.4f}, k :{8:.4f},".format(self.current_epoch, val_loss, val_acc*100, val_f1, val_k, train_loss, train_acc*100, train_f1, train_k))
if (val_loss < self.best_logs[1].item()):
self.best_logs = torch.tensor([self.current_epoch, val_loss, val_acc, val_f1, val_k],device=self.device)
print("we have a new best at epoch {0:d} with validation accuracy: {1:.2f}%, f1: {2:.4f} and k: {3:.4f}".format(self.current_epoch, val_acc*100, val_f1, val_k))
self.best_model = copy.deepcopy(self.model)
test_loss, test_acc, test_f1, test_k, test_auc, test_conf, test_loss1, test_loss2 = self.test()
print("Test loss: {0:.6}, accuracy: {1:.2f}% f1 :{2:.4f}, k :{3:.4f}, auc :{4:.4f}".format(test_loss, test_acc*100, test_f1, test_k, test_auc))
self.test_logs[self.test_it] = torch.tensor([self.current_epoch,test_loss,test_acc,test_f1,test_k, test_loss1, test_loss2],device=self.device)
self.test_it+=1
self.save_checkpoint(self.config.save_dir)
not_saved = False
epochs_no_improve = 0
else:
test_loss, test_acc, test_f1, test_k, test_auc, test_conf, test_loss1, test_loss2 = self.test()
# print("Test loss: {0:.6}, accuracy: {1:.2f}% f1 :{2:.4f}, k :{3:.4f}, auc :{4:.4f}".format(test_loss, test_acc*100, test_f1, test_k, test_auc))
self.test_logs[self.test_it] = torch.tensor([self.current_epoch,test_loss,test_acc,test_f1,test_k,test_loss1, test_loss2],device=self.device)
self.test_it+=1
epochs_no_improve += 1
if (self.current_epoch % self.config.save_every == 0 and not_saved):
self.save_checkpoint(self.config.save_dir)
print("This epoch took {} seconds".format(time.time() - start))
if self.current_epoch > 5 and epochs_no_improve == self.config.n_epochs_stop:
print('Early stopping!')
early_stop = True
break
else:
print("Epoch {0:d} Validation loss: {1:.6f}, accuracy: {2:.2f}% f1 :{3:.4f}, k :{4:.4f} Training loss: {5:.6f}, accuracy: {6:.2f}% f1 :{7:.4f}, k :{8:.4f},".format(self.current_epoch, val_loss, val_acc*100, val_f1, val_k, train_loss, train_acc*100, train_f1, train_k))
self.best_model = copy.deepcopy(self.model)
test_loss, test_acc, test_f1, test_k, test_auc, test_conf, test_loss1, test_loss2 = self.test()
print("Test loss: {0:.6f}, accuracy: {1:.2f}% f1 :{2:.4f}, k :{3:.4f}, auc :{4:.4f}".format(test_loss, test_acc * 100, test_f1, test_k,test_auc))
self.test_logs[self.test_it] = torch.tensor([self.current_epoch, test_loss, test_acc, test_f1, test_k, test_loss1, test_loss2], device=self.device)
self.test_it += 1
self.save_checkpoint(self.config.save_dir)
if early_stop:
print("Early Stopping Occurred")
def train_one_epoch(self):
"""
One epoch of training
:return:
"""
self.model.train()
batch_loss, batch_loss1, batch_loss2 = 0,0,0
tts, preds = [], []
for batch_idx, (data, target, _) in tqdm(enumerate(self.data_loader.train_loader),"Training",leave=False, disable=self.config.tdqm): #enumerate(self.data_loader.train_loader):
# self.plot_eeg(data[0].numpy())
view_1, target = data[0].float().to(self.device), target.to(self.device)
self.optimizer.zero_grad()
pred, view = self.model(view_1)
loss1 = self.loss(pred, target)
loss2 = self.loss2(view, view_1)
loss = loss1 + loss2
loss.backward()
batch_loss1 +=loss1.item()
batch_loss2 +=loss2.item()
batch_loss +=loss.item()
tts.append(target)
preds.append(pred)
self.optimizer.step()
self.scheduler.step()
tts = torch.cat(tts).cpu().numpy()
preds = torch.cat(preds).argmax(axis=1).cpu().numpy()
return batch_loss/len(tts), np.equal(tts,preds).sum()/len(tts), f1_score(preds,tts), cohen_kappa_score(preds,tts), batch_loss1/len(tts), batch_loss2/len(tts)
def validate(self):
"""
One cycle of model validation
:return:
"""
self.model.eval()
valid_loss, batch_loss1, batch_loss2 = 0,0,0
tts, preds = [], []
with torch.no_grad():
for batch_idx, (data, target, _) in tqdm(enumerate(self.data_loader.valid_loader),"Validation",leave=False, disable=self.config.tdqm): #enumerate(self.data_loader.valid_loader):
view_1, target = data[0].float().to(self.device), target.to(self.device)
pred, view = self.model(view_1)
loss1 = self.loss(pred, target)
loss2 = self.loss2(view, view_1)
loss = loss1 + loss2
batch_loss1 += loss1.item()
batch_loss2 += loss2.item()
valid_loss += loss.item()
tts.append(target)
preds.append(pred)
tts = torch.cat(tts).cpu().numpy()
preds = torch.cat(preds).cpu().numpy()
for w_idx in range(0, len(preds), self.config.post_proc_step):
for n_class in range(len(preds[0])):
preds[w_idx:w_idx + self.config.post_proc_step, n_class] = preds[ w_idx:w_idx + self.config.post_proc_step, n_class].sum() / self.config.post_proc_step
preds = preds.argmax(axis=1)
return valid_loss/len(tts), np.equal(tts, preds).sum()/len(tts), f1_score(preds, tts), cohen_kappa_score(tts, preds), batch_loss1/len(tts), batch_loss2/len(tts)
def test(self):
"""
One cycle of model validation
:return:
"""
self.model.eval()
test_loss, batch_loss1, batch_loss2 = 0,0,0
tts = []
preds = []
with torch.no_grad():
for batch_idx, (data, target, _) in tqdm(enumerate(self.data_loader.test_loader),"Test",leave=False, disable=self.config.tdqm):
view_1, target = data[0].float().to(self.device), target.to(self.device)
pred, view = self.model(view_1)
loss1 = self.loss(pred, target)
loss2 = self.loss2(view, view_1)
loss = loss1 + loss2
batch_loss1 += loss1.item()
batch_loss2 += loss2.item()
test_loss += loss.item()
tts.append(target)
preds.append(pred)
tts = torch.cat(tts).cpu().numpy()
preds = torch.cat(preds).cpu().numpy()
for w_idx in range(0, len(preds), self.config.post_proc_step):
for n_class in range(len(preds[0])):
preds[w_idx:w_idx + self.config.post_proc_step, n_class] = preds[ w_idx:w_idx + self.config.post_proc_step, n_class].sum() / self.config.post_proc_step
preds = preds.argmax(axis=1)
test_acc = np.equal(tts, preds).sum() / len(tts)
test_f1 = f1_score(preds, tts)
test_k = cohen_kappa_score(tts, preds)
test_auc = roc_auc_score(tts, preds)
test_conf = confusion_matrix(tts, preds)
return test_loss, test_acc, test_f1, test_k, test_auc, test_conf, batch_loss1/len(tts), batch_loss2/len(tts)
def finalize(self):
"""
Finalizes all the operations of the 2 Main classes of the process, the operator and the data loader
:return:
"""
# self.save_checkpoint("./data/{}".format(self.config.checkpoint_file),0)
print("We are in the final state.")
self.plot_losses()
self.plot_k()
# self.save_checkpoint(self.config.save_dir)
# print("test mse is {}".format(self.test()))
def train_time_shift(self):
"""
One epoch of training
:return:
"""
self.model.train()
batch_loss = 0
tts = []
preds = []
for batch_idx, (data, target, _) in enumerate(self.data_loader.train_loader): #tqdm(enumerate(self.data_loader.train_loader),"Training", leave=False):
# self.plot_eeg(data[0].numpy())
view_1 = data[0].unsqueeze(dim=-1).permute(0,3,1,2).float()
view_1, target = view_1.to(self.device), target.to(self.device)
if (batch_idx==0):
past_view = view_1
elif (batch_idx==1):
self.optimizer.zero_grad()
current_view = view_1
elif (batch_idx==len(self.data_loader.train_loader)-1):
break
else:
self.optimizer.zero_grad()
future_view = view_1
pred = self.model(past_view,current_view,future_view)
loss = self.loss(pred, target)
loss.backward()
batch_loss +=loss
tts.append(target.cpu().numpy())
preds.append(pred.detach().cpu().numpy())
self.optimizer.step()
self.scheduler.step()
past_view = current_view
current_view = future_view
tts = np.array([x for i in tts for x in i])
preds = | np.array([x for i in preds for x in i]) | numpy.array |
"""
Target State Module
Class of thermal states used for our learning tasks.
"""
from itertools import product
from numpy.linalg import norm
from numpy.random import normal as N
from numpy.random import randn
from numpy.random import seed
from qutip.operators import sigmax, sigmay, sigmaz, identity
from qutip.tensor import tensor
def tensor_sigmas(positions, paulis, size):
Is=[identity(2) for _ in range(size-len(positions))]
for p, pauli in zip(positions, paulis):
Is.insert(p, pauli)
return tensor(Is)
def two_body_ts(sigma_onsite, size, normalize, temp=1):
loop_z=sum([N(scale=sigma_onsite)*tensor_sigmas([i], [sigmaz()], size=size) for i in
range(size)])
loop_y=sum([ | N(scale=sigma_onsite) | numpy.random.normal |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 14:25:06 2018
@author: jeremiasknoblauch
Description: Plot the Air Pollution Data for NIPS submission
"""
import csv
import numpy as np
import scipy
from Evaluation_tool import EvaluationTool
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
import datetime
import matplotlib
#import matplotlib as mpl
#mpl.rcParams.update(mpl.rcParamsDefault)
plot_model_post_only = False
plot_model_post_and_rld_only = True
plot_top = False
plot_bottom = True
"""""STEP 1: DATA TRANSFOMRATIONS"""""
normalize = True
deseasonalize_2h = True
deseasonalize_day = True #only one of the two deseasonalizations should be chosen
shortened, shortened_to = False, 500
daily_avg = True
if daily_avg:
deseasonalize_2h = False
data_dir = ("//Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//" +
"//Code//SpatialBOCD//Data//AirPollutionData")
cp_type = "CongestionChargeData"
dist_file_road = (data_dir + "//" + cp_type + "//" +
"RoadDistanceMatrix_")
dist_file_euclid = (data_dir + "//" + cp_type + "//" +
"EuclideanDistanceMatrix_")
res_path = ("/Users//jeremiasknoblauch//Documents////OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//PaperNIPS//AirPollution//")
results_file_DPD = (res_path + "results_DPD.txt")
results_file_KL = (res_path + "results_KL.txt")
frequency = "2h" #2h, daily (=every 15 min),
mode = "bigger" #bigger, smaller (bigger contains more filled-in values)
if mode == "bigger":
stationIDs = ["BT1", "BX1", "BX2", "CR2", "CR4",
"EA1", "EA2", "EN1", "GR4", "GR5",
"HG1", "HG2", "HI0", "HI1", "HR1",
"HS2", "HV1", "HV3", "KC1", "KC2",
"LH2", "MY1", "RB3", "RB4", "TD0",
"TH1", "TH2", "WA2", "WL1"]
elif mode == "smaller":
stationIDs = ["BT1", "BX2", "CR2", "EA2", "EN1", "GR4",
"GR5", "HG1", "HG2", "HI0", "HR1", "HV1",
"HV3", "KC1", "LH2", "RB3", "TD0", "WA2"]
num_stations = len(stationIDs)
"""STEP 1: Read in distances"""
"""STEP 1.1: Read in road distances (as strings)"""
pw_distances_road = []
station_IDs = []
count = 0
with open(dist_file_road + mode + ".csv") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
pw_distances_road += row
"""STEP 1.2: Read in euclidean distances (as strings)"""
pw_distances_euclid = []
station_IDs = []
count = 0
with open(dist_file_euclid + mode + ".csv") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
pw_distances_euclid += row
"""STEP 1.3: Convert both distance lists to floats and matrices"""
pw_d_r, pw_d_e = [], []
for r,e in zip(pw_distances_road, pw_distances_euclid):
pw_d_r.append(float(r))
pw_d_e.append(float(e))
pw_distances_road = np.array(pw_d_r).reshape(num_stations, num_stations)
pw_distances_euclid = np.array(pw_d_e).reshape(num_stations, num_stations)
"""STEP 2: Convert distance matrices to nbhs"""
cutoffs = [0.0, 10.0, 20.0, 30.0, 40.0, 100.0]
num_nbhs = len(cutoffs) - 1
"""STEP 2.1: road distances"""
road_nbhs = []
for location in range(0, num_stations):
location_nbh = []
for i in range(0, num_nbhs):
larger_than, smaller_than = cutoffs[i], cutoffs[i+1]
indices = np.intersect1d(
np.where(pw_distances_road[location,:] > larger_than),
np.where(pw_distances_road[location,:] < smaller_than)).tolist()
location_nbh.append(indices.copy())
road_nbhs.append(location_nbh.copy())
"""STEP 2.2: euclidean distances"""
euclid_nbhs =[]
for location in range(0, num_stations):
location_nbh = []
for i in range(0, num_nbhs):
larger_than, smaller_than = cutoffs[i], cutoffs[i+1]
indices = np.intersect1d(
np.where(pw_distances_euclid[location,:] > larger_than),
np.where(pw_distances_euclid[location,:] < smaller_than)).tolist()
location_nbh.append(indices.copy())
euclid_nbhs.append(location_nbh.copy())
"""STEP 3: Read in station data for each station"""
station_data = []
for id_ in stationIDs:
file_name = (data_dir + "//" + cp_type + "//" +
id_ + "_081702-081703_" + frequency + ".txt")
"""STEP 3.1: Read in raw data"""
#NOTE: Skip the header
data_raw = []
count = 0
with open(file_name) as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if count > 0:
data_raw += row
count += 1
"""STEP 3.2: Convert to floats"""
#NOTE: We have row names, so skip every second
dat = []
for entry in data_raw:
dat += [float(entry)]
"""STEP 3.3: Append to station_data list"""
station_data.append(dat.copy())
"""STEP 4: Format the station data into a matrix"""
T, S1, S2 = len(station_data[0]), num_stations, 1
data = np.zeros((T, num_stations))
for i in range(0, num_stations):
data[:,i] = | np.array(station_data[i]) | numpy.array |
import numpy as np
from scipy.sparse.linalg import cg
from scipy.sparse.linalg import LinearOperator
import sampler
import random
from joblib import Parallel, delayed
class Trainer:
def __init__(self, h, reg_list=(100, 0.9, 1e-4), cores=1, mag = True):
self.h = h # Hamiltonian to evaluate wf against
self.nspins = h.nspins
self.reg_list = reg_list # Parameters for regularization
self.step_count = 0
self.nvar = 0
self.parallel_cores = cores
self.m = mag
def train(self, wf, init_state, batch_size, num_steps, gamma_fun, print_freq=25, file='', out_freq=0):
state = init_state
elist = np.zeros(num_steps, dtype=complex) # list of energies to evaluate
for step in range(num_steps):
# First call the update_vector function to get our set of updates and the new state (so process thermalizes)
updates, state, elist[step] = self.update_vector(wf, state, batch_size, gamma_fun(step), step)
# Now apply appropriate parts of the update vector to wavefunction parameters
self.apply_update(updates, wf)
if step % print_freq == 0:
print("Completed training step {}".format(step))
print("Current energy per spin: {}".format(elist[step]))
if out_freq > 0 and step % out_freq == 0:
wf.save_parameters(file + str(step))
return wf, elist
def update_vector(self, wf, init_state, batch_size, gamma, step, therm=False): # Get the vector of updates
self.nvar = self.get_nvar(wf)
wf.init_lt(init_state)
samp = sampler.Sampler(wf, self.h, mag0=self.m) # start a sampler
samp.nflips = self.h.minflips
samp.state = np.copy(init_state)
samp.reset_av()
if therm == True:
samp.thermalize(batch_size)
results = Parallel(n_jobs=self.parallel_cores)(
delayed(get_sample)(samp, self) for i in range(batch_size)) # Pass sampling to parallelization
elocals = np.array([i[0] for i in results])
deriv_vectors = np.array([i[1] for i in results])
states = np.array([i[2] for i in results])
# Now that we have all the data from sampling let's run our statistics
# cov = self.get_covariance(deriv_vectors)
cov_operator = LinearOperator((self.nvar, self.nvar), dtype=complex,
matvec=lambda v: self.cov_operator(v, deriv_vectors, step))
forces = self.get_forces(elocals, deriv_vectors)
# Now we calculate the updates as
# updates = -gamma * np.dot(np.linalg.pinv(cov), forces)
vec, info = cg(cov_operator, forces)
# vec, info = cg(cov, forces)
updates = -gamma * vec
self.step_count += batch_size
return updates, samp.state, np.mean(elocals) / self.nspins
def get_elocal(self, state, wf): # Function to calculate local energies; see equation A2 in Carleo and Troyer
if not all(state == wf.state): # make sure wavefunction lookup table is properly initialized
wf.init_lt(state)
mel, flips = self.h.find_conn(state) # Get all S' that connect to S via H and their matrix elems
#for flip in range(len(flips)):
# eloc += mel[flip] * wf.pop(state, flips[flip])
eloc = sum([m*wf.pop(state,f) for m, f in zip(mel, flips)])
return eloc
def get_deriv_vector(self, state, wf):
# The derivative vector is a vector which contains the following elements in one column:
# First: derivative of psi(S) w.r.t. visible unit biases (wf.Nv of them)
# Second: the hidden unit biases (wf.Nh of them)
# Third: The weights (wf.Nh * wf.Nv)
# See Carleo C3-5 for formulas
vector = np.zeros(self.nvar, dtype=complex) # initialize
for bias in range(wf.nv): # visible unit biases
vector[bias] = state[bias]
for bias in range(wf.nh): # hidden unit biases
vector[wf.nv + bias] = np.tanh(wf.Lt[bias])
for v in range(wf.nv):
for h in range(wf.nh):
vector[wf.nh + wf.nv + wf.nh * v + h] = state[v] * np.tanh(wf.Lt[h])
return vector
def get_forces(self, elocals, deriv_vectors):
emean = np.mean(elocals) # mean local energy
omean = np.mean(deriv_vectors, axis=0) # mean derivative vector
correlator = np.mean([i[0] * np.conj(i[1]) for i in zip(elocals, deriv_vectors)], axis=0)
# pair the local energies with Ovecs and then calculate mean
return correlator - emean * np.conj(omean)
def cov_operator(self, vec, deriv_vectors, step): # Callable function for evaluating S*v
tvec = np.dot(deriv_vectors, vec) # vector of t-values
term1 = np.dot(deriv_vectors.T.conj(), tvec) / deriv_vectors.shape[0]
term2 = np.mean(deriv_vectors.conj(), axis=0) * np.mean(tvec)
reg = max(self.reg_list[0] * self.reg_list[1] ** step, self.reg_list[2]) * vec
return term1 - term2 + reg
def apply_update(self, updates, wf):
wf.a += updates[0:wf.nv]
wf.b += updates[wf.nv:wf.nh + wf.nv]
wf.W += np.reshape(updates[wf.nv + wf.nh:], wf.W.shape)
def get_nvar(self, wf):
return wf.nh + wf.nv + wf.nh * wf.nv
class TrainerTI(Trainer):
def __init__(self, h, reg_list=(100, 0.9, 1e-4), cores=1):
Trainer.__init__(self, h, reg_list=reg_list, cores=cores)
def apply_update(self, updates, wf):
wf.a += updates[0]
wf.breduced += updates[1:wf.alpha + 1]
wf.Wreduced += updates[wf.alpha + 1:].reshape(wf.Wreduced.shape)
def get_deriv_vector(self, state, wf):
# The derivative vector is a vector which contains the following elements in one column:
# First: derivative of psi(S) w.r.t. visible unit biases (1 of them)
# Second: the hidden unit biases (wf.alpha of them)
# Third: The weights (wf.Nh * wf.alpha)
# See Carleo C3-5 for formulas
vector = | np.zeros(1 + wf.alpha + wf.nv * wf.alpha, dtype=complex) | numpy.zeros |
from numpy import full, ones
from optimix import Function, Scalar
class OffsetMean(Function):
r"""
Offset mean function, θ⋅𝟏.
It represents a mean vector 𝐦 = θ⋅𝟏 of size n. The offset is given by the parameter
θ.
Example
-------
.. doctest::
>>> from glimix_core.mean import OffsetMean
>>>
>>> mean = OffsetMean(3)
>>> mean.offset = 2.0
>>> print(mean.value())
[2. 2. 2.]
>>> print(mean.gradient())
{'offset': array([1., 1., 1.])}
>>> mean.name = "𝐦"
>>> print(mean)
OffsetMean(): 𝐦
offset: 2.0
"""
def __init__(self, n):
"""
Constructor.
Parameters
----------
n : int
Size of the 𝟏 array.
"""
self._offset = Scalar(0.0)
self._offset.bounds = (-200.0, +200)
self._n = n
Function.__init__(self, "OffsetMean", offset=self._offset)
def fix_offset(self):
"""
Prevent θ update during optimization.
"""
self._fix("offset")
def unfix_offset(self):
"""
Enable θ update during optimization.
"""
self._unfix("offset")
def value(self):
"""
Offset mean.
Returns
-------
𝐦 : (n,) ndarray
θ⋅𝟏.
"""
return | full(self._n, self._offset.value) | numpy.full |
"""
This module provides an abstract base class for a callback and a default
implementation.
If you want to store the data in a way that is different from the
functionality provided by the default callback, you can write your own
extension of callback. For example, you can easily implement a callback
that stores the data in e.g. a NoSQL file.
The only method to implement is the __call__ magic method. To use logging of
progress, always call super.
"""
import abc
import csv
import os
import shutil
import numpy as np
import pandas as pd
from .parameters import (CategoricalParameter, IntegerParameter,
BooleanParameter)
from ..util import ema_exceptions, get_module_logger
#
# Created on 22 Jan 2013
#
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
#
__all__ = ['AbstractCallback',
'DefaultCallback',
'FileBasedCallback']
_logger = get_module_logger(__name__)
class AbstractCallback(object):
"""
Abstract base class from which different call back classes can be derived.
Callback is responsible for storing the results of the runs.
Parameters
----------
uncs : list
a list of the parameters over which the experiments
are being run.
outcomes : list
a list of outcomes
nr_experiments : int
the total number of experiments to be executed
reporting_interval : int, optional
the interval at which to provide progress
information via logging.
reporting_frequency: int, optional
the total number of progress logs
Attributes
----------
i : int
a counter that keeps track of how many experiments have been
saved
reporting_interval : int,
the interval between progress logs
"""
__metaclass__ = abc.ABCMeta
i = 0
def __init__(self, uncertainties, outcomes, levers,
nr_experiments, reporting_interval=None,
reporting_frequency=10):
if reporting_interval is None:
reporting_interval = max(
1, int(round(nr_experiments / reporting_frequency)))
self.reporting_interval = reporting_interval
@abc.abstractmethod
def __call__(self, experiment, outcomes):
"""
Method responsible for storing results. The implementation in this
class only keeps track of how many runs have been completed and
logging this. Any extension of AbstractCallback needs to implement
this method. If one want to use the logging provided here, call it via
super.
Parameters
----------
experiment: Experiment instance
outcomes: dict
the outcomes dict
"""
#
# TODO:: https://github.com/alexanderkuk/log-progress
# can we detect whether we are running within Jupyter?
# yes:
# https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
self.i += 1
_logger.debug(str(self.i) + " cases completed")
if self.i % self.reporting_interval == 0:
_logger.info(str(self.i) + " cases completed")
@abc.abstractmethod
def get_results(self):
"""
method for retrieving the results. Called after all experiments
have been completed. Any extension of AbstractCallback needs to
implement this method.
"""
class DefaultCallback(AbstractCallback):
"""
default callback system
callback can be used in perform_experiments as a means for
specifying the way in which the results should be handled. If no
callback is specified, this default implementation is used. This
one can be overwritten or replaced with a callback of your own
design. For example if you prefer to store the result in a database
or write them to a text file
"""
i = 0
cases = None
results = {}
shape_error_msg = "can only save up to 2d arrays, this array is {}d"
constraint_error_msg = ('can only save 1d arrays for constraint, '
'this array is {}d')
def __init__(self, uncs, levers, outcomes, nr_experiments,
reporting_interval=100, reporting_frequency=10):
"""
Parameters
----------
uncs : list
a list of the parameters over which the experiments
are being run.
outcomes : list
a list of outcomes
nr_experiments : int
the total number of experiments to be executed
reporting_interval : int, optional
the interval between progress logs
reporting_frequency: int, optional
the total number of progress logs
"""
super(DefaultCallback, self).__init__(uncs, levers, outcomes,
nr_experiments,
reporting_interval,
reporting_frequency)
self.i = 0
self.cases = None
self.results = {}
self.outcomes = [outcome.name for outcome in outcomes]
# determine data types of parameters
columns = []
dtypes = []
self.parameters = []
for parameter in uncs + levers:
name = parameter.name
self.parameters.append(name)
dataType = 'float'
if isinstance(parameter, CategoricalParameter):
dataType = 'object'
elif isinstance(parameter, BooleanParameter):
dataType = 'bool'
elif isinstance(parameter, IntegerParameter):
dataType = 'int'
columns.append(name)
dtypes.append(dataType)
for name in ['scenario', 'policy', 'model']:
columns.append(name)
dtypes.append('object')
df = pd.DataFrame(index=np.arange(nr_experiments))
for name, dtype in zip(columns, dtypes):
df[name] = pd.Series(dtype=dtype)
self.cases = df
self.nr_experiments = nr_experiments
for outcome in outcomes:
shape = outcome.shape
if shape is not None:
shape = (nr_experiments,) + shape
data = np.empty(shape)
data[:] = np.nan
self.results[outcome.name] = data
def _store_case(self, experiment):
scenario = experiment.scenario
policy = experiment.policy
index = experiment.experiment_id
self.cases.at[index, 'scenario'] = scenario.name
self.cases.at[index, 'policy'] = policy.name
self.cases.at[index, 'model'] = experiment.model_name
for k, v in scenario.items():
self.cases.at[index, k] = v
for k, v in policy.items():
self.cases.at[index, k] = v
def _store_outcomes(self, case_id, outcomes):
for outcome in self.outcomes:
_logger.debug("storing {}".format(outcome))
try:
outcome_res = outcomes[outcome]
except KeyError:
message = "%s not specified as outcome in msi" % outcome
_logger.debug(message)
else:
try:
self.results[outcome][case_id,] = outcome_res
except KeyError:
a = np.asarray(outcome_res, dtype=float)
shape = a.shape
if len(shape) > 2:
message = self.shape_error_msg.format(len(shape))
raise ema_exceptions.EMAError(message)
shape = list(shape)
shape.insert(0, self.nr_experiments)
self.results[outcome] = | np.empty(shape, dtype=a.dtype) | numpy.empty |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = | np.ones((1, 3, 3, 1), dtype=np.uint8) | numpy.ones |
import sys
import os
import re
import glob
from PIL import Image, ImageDraw
import ocrolib
from re import split
import os.path
import json
import numpy as np
import cv2
import imageio
from ..constants import OCRD_TOOL
from shapely.geometry import MultiPoint
import click
from ocrd.decorators import ocrd_cli_options, ocrd_cli_wrap_processor
import subprocess
from ocrolib import psegutils, morph, sl
from scipy.ndimage.filters import gaussian_filter, uniform_filter, maximum_filter
from ocrd import Processor
from ocrd_modelfactory import page_from_file
from ocrd_utils import (
getLogger,
make_file_id,
assert_file_grp_cardinality,
MIMETYPE_PAGE,
coordinates_for_segment,
points_from_polygon
)
from ocrd_models.ocrd_page import (
to_xml,
AlternativeImageType,
TextRegionType,
CoordsType,
TextLineType
)
TOOL = 'ocrd-anybaseocr-textline'
class OcrdAnybaseocrTextline(Processor):
def __init__(self, *args, **kwargs):
kwargs['ocrd_tool'] = OCRD_TOOL['tools'][TOOL]
kwargs['version'] = OCRD_TOOL['version']
super(OcrdAnybaseocrTextline, self).__init__(*args, **kwargs)
def addzeros(self, file):
F = open(file, "r")
D = F.read()
D = split("\n", D)
D = D[:-1]
F.close()
F = open(file, "w")
for d in D:
d += " 0 0 0 0\n"
F.write(d)
def process(self):
LOG = getLogger('OcrdAnybaseocrTextline')
assert_file_grp_cardinality(self.input_file_grp, 1)
assert_file_grp_cardinality(self.output_file_grp, 1)
oplevel = self.parameter['operation_level']
for (n, input_file) in enumerate(self.input_files):
page_id = input_file.pageId or input_file.ID
pcgts = page_from_file(self.workspace.download_file(input_file))
self.add_metadata(pcgts)
page = pcgts.get_Page()
LOG.info("INPUT FILE %s", input_file.pageId or input_file.ID)
page_image, page_xywh, page_image_info = self.workspace.image_from_page(page, page_id, feature_selector='binarized,deskewed')
if oplevel == 'page':
LOG.warning("Operation level should be region.")
self._process_segment(page_image, page,None, page_xywh, page_id, input_file, n)
else:
regions = page.get_TextRegion()
if not regions:
LOG.warning("Page '%s' contains no text regions", page_id)
continue
for (k, region) in enumerate(regions):
region_image, region_xywh = self.workspace.image_from_segment(region, page_image, page_xywh)
self._process_segment(region_image, page, region, region_xywh, region.id, input_file, k)
file_id = make_file_id(input_file, self.output_file_grp)
pcgts.set_pcGtsId(file_id)
self.workspace.add_file(
ID=file_id,
file_grp=self.output_file_grp,
pageId=input_file.pageId,
mimetype=MIMETYPE_PAGE,
local_filename=os.path.join(self.output_file_grp, file_id + '.xml'),
content=to_xml(pcgts).encode('utf-8')
)
def _process_segment(self, page_image, page, textregion, region_xywh, page_id, input_file, n):
LOG = getLogger('OcrdAnybaseocrTextline')
#check for existing text lines and whether to overwrite them
if textregion.get_TextLine():
if self.parameter['overwrite']:
LOG.info('removing existing TextLines in region "%s"', page_id)
textregion.set_TextLine([])
else:
LOG.warning('keeping existing TextLines in region "%s"', page_id)
return
binary = ocrolib.pil2array(page_image)
if len(binary.shape) > 2:
binary = np.mean(binary, 2)
binary = np.array(1-binary/np.amax(binary),'B')
if self.parameter['scale'] == 0:
scale = psegutils.estimate_scale(binary)
else:
scale = self.parameter['scale']
if np.isnan(scale) or scale > 1000.0 or scale < self.parameter['minscale']:
LOG.warning(str(scale)+": bad scale; skipping!\n" )
return
segmentation = self.compute_segmentation(binary, scale)
if np.amax(segmentation) > self.parameter['maxlines']:
LOG.warning("too many lines %i; skipping!\n", (np.amax(segmentation)))
return
lines = psegutils.compute_lines(segmentation, scale)
order = psegutils.reading_order([l.bounds for l in lines])
lsort = psegutils.topsort(order)
# renumber the labels so that they conform to the specs
nlabels = np.amax(segmentation)+1
renumber = np.zeros(nlabels, 'i')
for i, v in enumerate(lsort):
renumber[lines[v].label] = 0x010000+(i+1)
segmentation = renumber[segmentation]
lines = [lines[i] for i in lsort]
cleaned = ocrolib.remove_noise(binary, self.parameter['noise'])
for i, l in enumerate(lines):
#LOG.info('check this: ')
#LOG.info(type(l.bounds))
#LOG.info(l.bounds)
#line_points = np.where(l.mask==1)
#hull = MultiPoint([x for x in zip(line_points[0],line_points[1])]).convex_hull
#x,y = hull.exterior.coords.xy
#LOG.info('hull coords x: ',x)
#LOG.info('hull coords y: ',y)
min_x, max_x = (l.bounds[0].start, l.bounds[0].stop)
min_y, max_y = (l.bounds[1].start, l.bounds[1].stop)
line_polygon = [[min_x, min_y], [max_x, min_y], [max_x, max_y], [min_x, max_y]]
#line_polygon = [x for x in zip(y, x)]
line_polygon = coordinates_for_segment(line_polygon, page_image, region_xywh)
line_points = points_from_polygon(line_polygon)
img = cleaned[l.bounds[0],l.bounds[1]]
img = np.array(255*(img>ocrolib.midrange(img)),'B')
img = 255-img
img = ocrolib.array2pil(img)
file_id = make_file_id(input_file, self.output_file_grp)
file_path = self.workspace.save_image_file(img,
file_id+"_"+str(n)+"_"+str(i),
page_id=page_id,
file_grp=self.output_file_grp
)
ai = AlternativeImageType(filename=file_path, comments=region_xywh['features'])
line_id = '%s_line%04d' % (page_id, i)
line = TextLineType(custom='readingOrder {index:'+str(i)+';}', id=line_id, Coords=CoordsType(line_points))
line.add_AlternativeImage(ai)
textregion.add_TextLine(line)
#line_test = textregion.get_TextLine()[-1]
#region_img, region_xy = self.workspace.image_from_segment(line_test, page_image, region_xywh)
#region_img.save('checkthis.png')
#cv2.imwrite('checkthis.jpg', region_img)
def B(self, a):
if a.dtype == dtype('B'):
return a
return np.array(a, 'B')
################################################################
# Column finding.
###
# This attempts to find column separators, either as extended
# vertical black lines or extended vertical whitespace.
# It will work fairly well in simple cases, but for unusual
# documents, you need to tune the parameter.
################################################################
def compute_separators_morph(self, binary, scale):
"""Finds vertical black lines corresponding to column separators."""
d0 = int(max(5, scale/4))
d1 = int(max(5, scale))+self.parameter['sepwiden']
thick = morph.r_dilation(binary, (d0, d1))
vert = morph.rb_opening(thick, (10*scale, 1))
vert = morph.r_erosion(vert, (d0//2, self.parameter['sepwiden']))
vert = morph.select_regions(vert, sl.dim1, min=3, nbest=2*self.parameter['maxseps'])
vert = morph.select_regions(vert, sl.dim0, min=20*scale, nbest=self.parameter['maxseps'])
return vert
def compute_colseps_morph(self, binary, scale, maxseps=3, minheight=20, maxwidth=5):
"""Finds extended vertical whitespace corresponding to column separators
using morphological operations."""
boxmap = psegutils.compute_boxmap(binary, scale, (0.4, 5), dtype='B')
bounds = morph.rb_closing(self.B(boxmap), (int(5*scale), int(5*scale)))
bounds = maximum(self.B(1-bounds), self.B(boxmap))
cols = 1-morph.rb_closing(boxmap, (int(20*scale), int(scale)))
cols = morph.select_regions(cols, sl.aspect, min=self.parameter['csminaspect'])
cols = morph.select_regions(cols, sl.dim0, min=self.parameter['csminheight']*scale, nbest=self.parameter['maxcolseps'])
cols = morph.r_erosion(cols, (int(0.5+scale), 0))
cols = morph.r_dilation(cols, (int(0.5+scale), 0), origin=(int(scale/2)-1, 0))
return cols
def compute_colseps_conv(self, binary, scale=1.0):
"""Find column separators by convoluation and
thresholding."""
h, w = binary.shape
# find vertical whitespace by thresholding
smoothed = gaussian_filter(1.0*binary, (scale, scale*0.5))
smoothed = uniform_filter(smoothed, (5.0*scale, 1))
thresh = (smoothed < np.amax(smoothed)*0.1)
# find column edges by filtering
grad = gaussian_filter(1.0*binary, (scale, scale*0.5), order=(0, 1))
grad = uniform_filter(grad, (10.0*scale, 1))
grad = (grad > 0.25* | np.amax(grad) | numpy.amax |
''' Testing track_metrics module '''
from StringIO import StringIO
import numpy as np
from nose.tools import assert_true, assert_false, assert_equal, assert_almost_equal
from numpy.testing import assert_array_equal, assert_array_almost_equal
from dipy.tracking import metrics as tm
from dipy.tracking import distances as pf
def test_splines():
#create a helix
t=np.linspace(0,1.75*2*np.pi,100)
x = np.sin(t)
y = np.cos(t)
z = t
# add noise
x+= np.random.normal(scale=0.1, size=x.shape)
y+= np.random.normal(scale=0.1, size=y.shape)
z+= np.random.normal(scale=0.1, size=z.shape)
xyz=np.vstack((x,y,z)).T
# get the B-splines smoothed result
xyzn=tm.spline(xyz,3,2,-1)
def test_minimum_distance():
xyz1=np.array([[1,0,0],[2,0,0]],dtype='float32')
xyz2=np.array([[3,0,0],[4,0,0]],dtype='float32')
assert_equal(pf.minimum_closest_distance(xyz1,xyz2), 1.0)
def test_segment_intersection():
xyz=np.array([[1,1,1],[2,2,2],[2,2,2]])
center=[10,4,10]
radius=1
assert_equal(tm.intersect_sphere(xyz,center,radius), False)
xyz=np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4]])
center=[10,10,10]
radius=2
assert_equal( tm.intersect_sphere(xyz,center,radius), False)
xyz=np.array([[1,1,1],[2,2,2],[3,3,3],[4,4,4]])
center=[2.1,2,2.2]
radius=2
assert_equal( tm.intersect_sphere(xyz,center,radius), True)
def test_most_similar_mam():
xyz1 = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
xyz2 = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
xyz3 = np.array([[-1,0,0],[2,0,0],[2,3,0],[3,0,0]],dtype='float32')
tracks=[xyz1,xyz2,xyz3]
for metric in ('avg', 'min', 'max'):
#pf should be much faster and the results equivalent
si2,s2=pf.most_similar_track_mam(tracks,metric=metric)
def test_bundles_distances_mam():
xyz1A = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]],dtype='float32')
xyz2A = np.array([[0,1,1],[1,0,1],[2,3,-2]],dtype='float32')
xyz1B = np.array([[-1,0,0],[2,0,0],[2,3,0],[3,0,0]],dtype='float32')
tracksA = [xyz1A, xyz2A]
tracksB = [xyz1B, xyz1A, xyz2A]
for metric in ('avg', 'min', 'max'):
DM2 = pf.bundles_distances_mam(tracksA, tracksB, metric=metric)
def test_mam_distances():
xyz1 = np.array([[0,0,0],[1,0,0],[2,0,0],[3,0,0]])
xyz2 = np.array([[0,1,1],[1,0,1],[2,3,-2]])
# dm=array([[ 2, 2, 17], [ 3, 1, 14], [6, 2, 13], [11, 5, 14]])
# this is the distance matrix between points of xyz1
# and points of xyz2
xyz1=xyz1.astype('float32')
xyz2=xyz2.astype('float32')
zd2 = pf.mam_distances(xyz1,xyz2)
assert_almost_equal( zd2[0], 1.76135602742)
def test_approx_ei_traj():
segs=100
t=np.linspace(0,1.75*2*np.pi,segs)
x =t
y=5*np.sin(5*t)
z=np.zeros(x.shape)
xyz= | np.vstack((x,y,z)) | numpy.vstack |
import numpy as np
def p(u):
U = np.linalg.norm(u)
A = U*0.5;
P = np.zeros((3,))
P[0] = 2*np.sin(A)/U**2*(u[1]*U*np.cos(A) + u[0]*u[2]*np.sin(A))
P[1] = 2*np.sin(A)/U**2*(-u[0]*U*np.cos(A) + u[1]*u[2]*np.sin(A))
P[2] = np.cos(A)**2 + np.sin(A)**2/U**2*(u[2]**2 - u[1]**2 - u[0]**2)
return P
axis = np.array( [0,1,0] )
angle = np.pi/2
u = angle*axis;
M = np.zeros((3,3))
h = 1e-6
for i in range(3):
for j in range(3):
u[i] += h
pp = p(u)
u[i] -= 2*h
pm = p(u)
M[i,j] = 0.5*(pp[j] - pm[j])/h
u[i] += h
print('Numerical derivative:')
print(M)
U = np.linalg.norm(u)
A = U*0.5;
Q = np.array( [ np.cos(A), u[0]/U*np.sin(A), u[1]/U*np.sin(A),
u[2]/U*np.sin(A) ] )
q0, q1, q2, q3 = Q
dpdQ = 2*np.array( [[q2, -q1, q0],
[q3, -q0, -q1],
[q0, q3, -q2],
[q1, q2, q3]] )
dQdu = np.zeros((3,4))
dQdu[:,0] = -0.5*np.sin(A)*u/U
dQdu[:,1:] = np.sin(A)* | np.eye(3) | numpy.eye |
import pandas as pd
from DataHandler.mongoObjects import CollectionManager
from datetime import date
from datetime import datetime as dt
import datedelta
import numpy as np
features = ['Asset Growth', 'Book Value per Share Growth', 'Debt Growth', 'Dividends per Basic Common Share Growth',
'EBIT Growth', 'EPS Diluted Growth', 'EPS Growth', 'Gross Profit Growth', 'Inventory Growth',
'Net Income Growth',
'Operating Cash Flow Growth', 'Trade and Non-Trade Receivables Growth']
def add_fundamentals_to_db():
"""
Adds the fundamental data to the database from a json file
:return:None
"""
fundFile = 'sectorAnalysis/fundamentals/combinedFundamentals.json'
funds = pd.read_json(fundFile)
manager = CollectionManager('10y_Fundamentals', 'AlgoTradingDB')
for index, row in funds.iterrows():
document = row.to_dict()
manager.insert(document, is_dictionary=True)
manager.close()
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, dt):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def get_next_trading_day(dates, days: list):
"""
Ensures that the day was a trading day and gets the data
for that day.
:param price_getter: lambda function
:param day: date
:return: price for that day of trading
"""
newDates = []
for day in days:
day = day
while day not in dates:
day += datedelta.DAY
newDates.append(day)
return newDates
def calculate_performance(ticker, dates1: list, dates2: list):
"""
Gets how much the stock has changed since the last
quarter reportings.
:param ticker: stock ticker
:param date1: beginining of the quarter
:param date2: end of the quarter
:return: percent change in stock price
"""
ticker = ticker.lower()
manager = CollectionManager('5Y_technicals', 'AlgoTradingDB')
prices = manager.find({'ticker': ticker})
dates = [dt.strptime(priceDate, '%Y-%m-%d').date() for priceDate in prices['date']]
pricesStart = [prices[prices['date'] == str(d1)]['vwap'].values[0] for d1 in get_next_trading_day(dates, dates1)]
pricesEnd = [prices[prices['date'] == str(d2)]['vwap'].values[0] for d2 in get_next_trading_day(dates, dates2)]
manager.close()
performances = [((p[0] - p[1]) / p[0]) for p in zip(pricesStart, pricesEnd)]
return performances
def get_historical_fundamentals(ticker: str, d: date, manager: CollectionManager):
"""
Gets all of the fundamental data for a ticker before some date and after
:param ticker: stock ticker
:param d: date
:param manager: collection manager for the fundamentals database
:return: past fundamentals, announcement dates, and future (test) fundamentals
"""
current_day = dt(d.year, d.month, d.day)
allTickersFundamentals = manager.find({'ticker': ticker, 'date': {'$lte': current_day}}).sort_values('date')
test = manager.find({'ticker': ticker, 'date': {'$gte': current_day}}).sort_values('date')
return allTickersFundamentals[features], [announce.date() for announce in
allTickersFundamentals['date'].tolist()], test
def find_best_stock(performances: pd.DataFrame):
"""
Finds the best stock given the performances
:param performances: list of performances
:return: the best stock
"""
best = []
stocks = performances.columns.values
for index, values in performances.iterrows():
maximum = np.argmax([abs(v) for v in values])
stock = stocks[maximum]
best.append(stock)
return best
def get_all_fundamentals(stocks: list, tradeDate: date):
"""
Gets all of the fundamentals for a list of tickers and list of quarters
:param tickers: stocks
:param quarters: list of quarters
:param final: whether this is a final prediction
:return: Xs and ys
"""
manager = CollectionManager('10y_Fundamentals', 'AlgoTradingDB')
tickers_set = set(stocks)
all_fundamental_tickers = set(manager.find({})["ticker"])
tickers = list(tickers_set.intersection(all_fundamental_tickers))
allFundamentals = pd.DataFrame()
performances = pd.DataFrame()
quarters = 17
allTest = pd.DataFrame()
testDates = []
for ticker in tickers:
data, announcementDates, test = get_historical_fundamentals(ticker, tradeDate, manager)
nextAnnouncementDates = announcementDates[1:] + [dt.strptime('2018-02-05', '%Y-%m-%d').date()]
performance = calculate_performance(ticker, announcementDates, nextAnnouncementDates)
if len(testDates) == 0:
testDates = test['date'].tolist()
if len(performance) != 17:
performance = performance[len(performance) - 17:]
performances[ticker] = performance
else:
performances[ticker] = performance
for index, funds in data.iterrows():
tempDF = pd.DataFrame()
tempDF['fundamentals'] = list(funds)[:-1]
tempDF['ticker'] = [ticker for i in range(len(funds) - 1)]
tempDF['quarter'] = [index for j in range(len(funds) - 1)]
allFundamentals = pd.concat([allFundamentals, tempDF])
for index, testFunds in test.iterrows():
temp = pd.DataFrame()
temp['fundamentals'] = list(testFunds)[:-1]
temp['ticker'] = [ticker for k in range(len(testFunds) - 1)]
temp['quarter'] = [index for l in range(len(testFunds) - 1)]
allTest = pd.concat([allTest, temp])
manager.close()
trainingData = []
for quarter in range(quarters):
q = []
for ticker in tickers:
tickerdata = allFundamentals[allFundamentals['ticker'] == ticker]
quarterdata = tickerdata[tickerdata['quarter'] == quarter]['fundamentals']
q.append(quarterdata.tolist())
trainingData.append(np.array(q))
trainingDataX = | np.array(trainingData) | numpy.array |
import pandas as pd
import datetime as datetime
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.smoothers_lowess import lowess
"""
cgmquantify package
Description:
The cgmquantify package is a comprehensive library for computing metrics from continuous glucose monitors.
Requirements:
pandas, datetime, numpy, matplotlib, statsmodels
Functions:
importdexcom(): Imports data from Dexcom continuous glucose monitor devices
interdaycv(): Computes and returns the interday coefficient of variation of glucose
interdaysd(): Computes and returns the interday standard deviation of glucose
intradaycv(): Computes and returns the intraday coefficient of variation of glucose
intradaysd(): Computes and returns the intraday standard deviation of glucose
TIR(): Computes and returns the time in range
TOR(): Computes and returns the time outside range
PIR(): Computes and returns the percent time in range
POR(): Computes and returns the percent time outside range
MGE(): Computes and returns the mean of glucose outside specified range
MGN(): Computes and returns the mean of glucose inside specified range
MAGE(): Computes and returns the mean amplitude of glucose excursions
J_index(): Computes and returns the J-index
LBGI(): Computes and returns the low blood glucose index
HBGI(): Computes and returns the high blood glucose index
ADRR(): Computes and returns the average daily risk range, an assessment of total daily glucose variations within risk space
MODD(): Computes and returns the mean of daily differences. Examines mean of value + value 24 hours before
CONGA24(): Computes and returns the continuous overall net glycemic action over 24 hours
GMI(): Computes and returns the glucose management index
eA1c(): Computes and returns the American Diabetes Association estimated HbA1c
summary(): Computes and returns glucose summary metrics, including interday mean glucose, interday median glucose, interday minimum glucose, interday maximum glucose, interday first quartile glucose, and interday third quartile glucose
plotglucosesd(): Plots glucose with specified standard deviation lines
plotglucosebounds(): Plots glucose with user-defined boundaries
plotglucosesmooth(): Plots smoothed glucose plot (with LOWESS smoothing)
"""
def importdexcom(filename):
"""
Imports data from Dexcom continuous glucose monitor devices
Args:
filename (String): path to file
Returns:
(pd.DataFrame): dataframe of data with Time, Glucose, and Day columns
"""
data = pd.read_csv(filename)
df = pd.DataFrame()
df['Time'] = data['Timestamp (YYYY-MM-DDThh:mm:ss)']
df['Glucose'] = pd.to_numeric(data['Glucose Value (mg/dL)'])
df.drop(df.index[:12], inplace=True)
df['Time'] = pd.to_datetime(df['Time'], format='%Y-%m-%dT%H:%M:%S')
df['Day'] = df['Time'].dt.date
df = df.reset_index()
return df
def importfreestylelibre(filename):
"""
Imports data from Abbott FreeStyle Libre continuous glucose monitor devices
Args:
filename (String): path to file
Returns:
(pd.DataFrame): dataframe of data with Time, Glucose, and Day columns
"""
data = pd.read_csv(filename, header=1, parse_dates=['Device Timestamp'])
df = pd.DataFrame()
historic_id = 0
df['Time'] = data.loc[data['Record Type'] == historic_id, 'Device Timestamp']
df['Glucose'] = pd.to_numeric(data.loc[data['Record Type'] == historic_id, 'Historic Glucose mg/dL'])
df['Day'] = df['Time'].dt.date
return df
def interdaycv(df):
"""
Computes and returns the interday coefficient of variation of glucose
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
Returns:
cvx (float): interday coefficient of variation averaged over all days
"""
cvx = (np.std(df['Glucose']) / (np.mean(df['Glucose'])))*100
return cvx
def interdaysd(df):
"""
Computes and returns the interday standard deviation of glucose
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
Returns:
interdaysd (float): interday standard deviation averaged over all days
"""
interdaysd = np.std(df['Glucose'])
return interdaysd
def intradaycv(df):
"""
Computes and returns the intraday coefficient of variation of glucose
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
Returns:
intradaycv_mean (float): intraday coefficient of variation averaged over all days
intradaycv_medan (float): intraday coefficient of variation median over all days
intradaycv_sd (float): intraday coefficient of variation standard deviation over all days
"""
intradaycv = []
for i in pd.unique(df['Day']):
intradaycv.append(interdaycv(df[df['Day']==i]))
intradaycv_mean = np.mean(intradaycv)
intradaycv_median = np.median(intradaycv)
intradaycv_sd = np.std(intradaycv)
return intradaycv_mean, intradaycv_median, intradaycv_sd
def intradaysd(df):
"""
Computes and returns the intraday standard deviation of glucose
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
Returns:
intradaysd_mean (float): intraday standard deviation averaged over all days
intradaysd_medan (float): intraday standard deviation median over all days
intradaysd_sd (float): intraday standard deviation standard deviation over all days
"""
intradaysd =[]
for i in pd.unique(df['Day']):
intradaysd.append(np.std(df[df['Day']==i]))
intradaysd_mean = np.mean(intradaysd)
intradaysd_median = np.median(intradaysd)
intradaysd_sd = np.std(intradaysd)
return intradaysd_mean, intradaysd_median, intradaysd_sd
def TIR(df, sd=1, sr=5):
"""
Computes and returns the time in range
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
sd (integer): standard deviation for computing range (default=1)
sr (integer): sampling rate (default=5[minutes, once every 5 minutes glucose is recorded])
Returns:
TIR (float): time in range, units=minutes
"""
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
TIR = len(df[(df['Glucose']<= up) & (df['Glucose']>= dw)])*sr
return TIR
def TOR(df, sd=1, sr=5):
"""
Computes and returns the time outside range
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
sd (integer): standard deviation for computing range (default=1)
sr (integer): sampling rate (default=5[minutes, once every 5 minutes glucose is recorded])
Returns:
TOR (float): time outside range, units=minutes
"""
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
TOR = len(df[(df['Glucose']>= up) | (df['Glucose']<= dw)])*sr
return TOR
def POR(df, sd=1, sr=5):
"""
Computes and returns the percent time outside range
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
sd (integer): standard deviation for computing range (default=1)
sr (integer): sampling rate (default=5[minutes, once every 5 minutes glucose is recorded])
Returns:
POR (float): percent time outside range, units=%
"""
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = np.mean(df['Glucose']) - sd*np.std(df['Glucose'])
TOR = len(df[(df['Glucose']>= up) | (df['Glucose']<= dw)])*sr
POR = (TOR/(len(df)*sr))*100
return POR
def PIR(df, sd=1, sr=5):
"""
Computes and returns the percent time inside range
Args:
(pd.DataFrame): dataframe of data with DateTime, Time and Glucose columns
sd (integer): standard deviation for computing range (default=1)
sr (integer): sampling rate (default=5[minutes, once every 5 minutes glucose is recorded])
Returns:
PIR (float): percent time inside range, units=%
"""
up = np.mean(df['Glucose']) + sd*np.std(df['Glucose'])
dw = | np.mean(df['Glucose']) | numpy.mean |
import turtle
import numpy as np
import random
from random import randint
class branch():
def __init__(self, x, x2, y, y2):
self.x = x
self.y = y
self.x2 = x2
self.y2 = y2
self.grow_count = 0
self.grow_x = 0
self.grow_y = 0
self.width = 1
self.child = []
self.screen = turtle.Screen()
self.screen.setup(width=84, height=84)
self.screen.bgcolor('black')
self.tree = turtle.Turtle()
self.tree.hideturtle()
self.tree.color('green')
self.tree.speed(0)
self.tree.pensize(2)
def plot(self):
self.tree.penup()
#self.tree.hideturtle()
self.tree.goto(self.x, self.y) # make the turtle go to the start position
self.tree.pendown()
self.tree.goto(self.x2, self.y2)
self.screen.update()
def draw(x, y, mindist, maxdist, branches):
for i in range(len(x) - 1, 0, -1):
closest_branch = 0
dist = 109
for j in range(len(branches)):
temp_dist = np.sqrt((x[i] - branches[j].x2) ** 2 + (y[i] - branches[j].y2) ** 2)
if temp_dist < dist:
dist = temp_dist
closest_branch = j
# removes scatter
if dist < mindist:
x = np.delete(x, i)
y = | np.delete(y, i) | numpy.delete |
import argparse
import math
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, 'utils'))
import provider
import tf_util
""" argument parser """
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='model', help='Model name: pointnet_cls or pointnet_cls_basic [default: pointnet_cls]')
parser.add_argument('--log_dir', default='log', help='Log dir [default: log]')
parser.add_argument('--num_point', type=int, default=512, help='Point Number [256/512/1024/2048] [default: 1024]')
parser.add_argument('--max_epoch', type=int, default=250, help='Epoch to run [default: 250]')
parser.add_argument('--batch_size', type=int, default=16, help='Batch Size during training [default: 16]')
parser.add_argument('--learning_rate', type=float, default=0.001, help='Initial learning rate [default: 0.001]')
parser.add_argument('--momentum', type=float, default=0.9, help='Initial learning rate [default: 0.9]')
parser.add_argument('--optimizer', default='adam', help='adam or momentum [default: adam]')
parser.add_argument('--decay_step', type=int, default=200000, help='Decay step for lr decay [default: 200000]')
parser.add_argument('--decay_rate', type=float, default=0.7, help='Decay rate for lr decay [default: 0.8]')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
MAX_EPOCH = FLAGS.max_epoch
BASE_LEARNING_RATE = FLAGS.learning_rate
GPU_INDEX = FLAGS.gpu
MOMENTUM = FLAGS.momentum
OPTIMIZER = FLAGS.optimizer
DECAY_STEP = FLAGS.decay_step
DECAY_RATE = FLAGS.decay_rate
""" model and training file """
MODEL = importlib.import_module(FLAGS.model) # import network module
MODEL_FILE = os.path.join(BASE_DIR, FLAGS.model+'.py')
LOG_DIR = FLAGS.log_dir
if not os.path.exists(LOG_DIR): os.mkdir(LOG_DIR)
os.system('cp %s %s' % (MODEL_FILE, LOG_DIR)) # bkp of model def
os.system('cp train.py %s' % (LOG_DIR)) # bkp of train procedure
""" log """
LOG_FOUT = open(os.path.join(LOG_DIR, 'log_train.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
""" learning parameters """
MAX_NUM_POINT = 512
NUM_CLASSES = 8
BN_INIT_DECAY = 0.5
BN_DECAY_DECAY_RATE = 0.5
BN_DECAY_DECAY_STEP = float(DECAY_STEP)
BN_DECAY_CLIP = 0.99
HOSTNAME = socket.gethostname()
""" import train/test data set """
TRAIN_FILES = provider.getDataAllFiles( \
os.path.abspath(os.path.join(BASE_DIR, '../data/train')))
TEST_FILES = provider.getDataAllFiles(\
os.path.abspath(os.path.join(BASE_DIR, '../data/test')))
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def get_learning_rate(batch):
learning_rate = tf.train.exponential_decay(
BASE_LEARNING_RATE, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
DECAY_STEP, # Decay step.
DECAY_RATE, # Decay rate.
staircase=True)
learning_rate = tf.maximum(learning_rate, 0.00001) # CLIP THE LEARNING RATE!
return learning_rate
def get_bn_decay(batch):
bn_momentum = tf.train.exponential_decay(
BN_INIT_DECAY,
batch * BATCH_SIZE,
BN_DECAY_DECAY_STEP,
BN_DECAY_DECAY_RATE,
staircase=True)
bn_decay = tf.minimum(BN_DECAY_CLIP, 1 - bn_momentum)
return bn_decay
def train():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
pointclouds_pl, labels_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
# Note the global_step=batch parameter to minimize.
# That tells the optimizer to helpfully increment the 'batch' parameter for you every time it trains.
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
tf.summary.scalar('bn_decay', bn_decay)
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, bn_decay=bn_decay)
loss = MODEL.get_loss(pred, labels_pl, end_points)
tf.summary.scalar('loss', loss)
correct = tf.equal(tf.argmax(pred, 1), tf.to_int64(labels_pl))
accuracy = tf.reduce_sum(tf.cast(correct, tf.float32)) / float(BATCH_SIZE)
tf.summary.scalar('accuracy', accuracy)
# Get training operator
learning_rate = get_learning_rate(batch)
tf.summary.scalar('learning_rate', learning_rate)
if OPTIMIZER == 'momentum':
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=MOMENTUM)
elif OPTIMIZER == 'adam':
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=batch)
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Add summary writers
#merged = tf.merge_all_summaries()
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'train'),
sess.graph)
test_writer = tf.summary.FileWriter(os.path.join(LOG_DIR, 'test'))
# Init variables
init = tf.global_variables_initializer()
# To fix the bug introduced in TF 0.12.1 as in
# http://stackoverflow.com/questions/41543774/invalidargumenterror-for-tensor-bool-tensorflow-0-12-1
#sess.run(init)
sess.run(init, {is_training_pl: True})
ops = {'pointclouds_pl': pointclouds_pl,
'labels_pl': labels_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'loss': loss,
'train_op': train_op,
'merged': merged,
'step': batch}
for epoch in range(MAX_EPOCH):
log_string('**** EPOCH %03d ****' % (epoch))
sys.stdout.flush()
train_one_epoch(sess, ops, train_writer)
eval_one_epoch(sess, ops, test_writer)
# Save the variables to disk.
if epoch % 10 == 0:
save_path = saver.save(sess, os.path.join(LOG_DIR, "model.ckpt"))
log_string("Model saved in file: %s" % save_path)
def train_one_epoch(sess, ops, train_writer):
""" ops: dict mapping from string to tf ops """
is_training = True
# Shuffle train files
train_file_idxs = np.arange(0, len(TRAIN_FILES))
np.random.shuffle(train_file_idxs)
for fn in range(len(TRAIN_FILES)):
log_string('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES[train_file_idxs[fn]])
current_data = current_data[:,0:NUM_POINT,:]
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = | np.squeeze(current_label) | numpy.squeeze |
# from __future__ import division
#-------------------------------------
#
# Started at 06/08/2018 (YuE)
#
# This script based on the previous script
# threeApproachesComparison_v6.py
#
## Upgraded version of python (python3.4): script was rewritten to take into
# account some differences in the descriptions and using of some functions
# (version cma_v3 and more earlier scripts are written under python2).
#
# 07/24/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
#
# But nevertheless, the dependences of the transmitted energy on the impact
# parameter are close to the inverse quadratic (as it should be!) at all velocities.
#
# 07/27/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
# The investigation of that is in progress.
#
# Some features were improved, some figures were corrected.
#
#-------------------------------------
#========================================================
#
# This code compairs two approaches: "classical" (from [1]) and
# "magnus" (from [2]).
#
# For "classical" approach the magnetized interaction between ion
# and electron is considered for ion velocities V_i > rmsTrnsvVe.
#
# References:
#
# [1] <NAME>, <NAME>, <NAME>, <NAME>.
# "Physics guide of BETACOOL code. Version 1.1". C-A/AP/#262, November
# 2006, Brookhaven National Laboratory, Upton, NY 11973.
# [2] <NAME>, <NAME>. "New Algorithm for Dynamical Friction
# of Ions in a Magnetized Electron Beam". AIP Conf. Proc. 1812, 05006 (2017).
#
#========================================================
#########################################################
#
# Main issues of the calculations:
#
# 1) Friction force (FF) is calculated in the (P)article (R)est (F)rame,
# i.e. in the frame moving together with both (cooled and cooling)
# beams at a velocity V0;
# 2) Friction force is calculated for each value of ion velocity
# in the interval from .1*rmsTrnsvVe till 10*rmsTrnsvVe;
# 3) Initially assumped that all electrons have a logitudinal
# velocity rmsLongVe and transversal velocity rmsTrnsvVe;
# 4) For each ion velocity the minimal and maximal values of the
# impact parameter are defined. Radius of the shielding of the
# electric field of the ion equals to the value of the maximal
# impact parameter;
# 5) For each impact parameter in the interval from minimal till
# maximal values the transfered momenta deltap_x,y,z are
# calculated;
# 6) Founded transfered momenta allow to calculate the transfered
# energy delta_E =deltap^2/(2*m_e) and to integrate it over
# impact parameter; then (expressions (3.4), (3.5) from [1]):
# FF =-2*pi*n_e*integral_rhoMin^rhoMax delta_E*rho*drho;
# 7) For taking into account the velocity distribution of the
# electrons it is necessary to repeat these calculations for
# each value of the electron's velocity and then integrate result
# over distribution of the velocities.
#
# 10/26/2018:
#
# 8) Item 6 is wrong and correct expression for transfered
# energy delta_E will be used;
# 9) Method (my own) Least Squares Method - LSM is used to fit the
# dependence of transferred momenta on impact parameter;
#
#
# 11/08/2018:
#
# 10) Two functions ('fitting' and 'errFitAB' are defined to realize
# my LSM to find the parameters of the fitting end error of this
# fitting;
#
# 11) Analys of different dependeces between values; graphical
# presentation of these dependences;
#
#########################################################
import os, sys
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
from matplotlib import ticker
from matplotlib import markers
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import scipy.integrate as integrate
from scipy.integrate import quad, nquad, dblquad
from scipy.constants import pi
from scipy import optimize
from statistics import mean
from array import array
#
# All physical constants have its dimension in units in the system CI.
# This code uses units in the system CGS!
#
from scipy.constants import speed_of_light as clight
from scipy.constants import epsilon_0 as eps0
from scipy.constants import mu_0 as mu0
from scipy.constants import elementary_charge as qe
from scipy.constants import electron_mass as me
from scipy.constants import proton_mass as mp
from scipy.constants import Boltzmann as kB
pi=3.14159265358
#
# Physical constants:
#
m_e=9.10938356e-28 # electron mass, g
m_elec=m_e # to keep variable from previous script
m_p=1.672621898e-24 # electron mass, g
M_ion = m_p # to keep variable from previous script
q_e=4.803204673e-10 # electron charge, CGSE unit: sqrt(g*cm^3/sec^2)
q_elec=q_e # to keep variable from previous script
Z_ion = q_e # to keep variable from previous script
cLight=2.99792458e10 # speed of light, cm/sec
eVtoErg=1.6021766208e-12 # 1 eV = 1.6...e-12 erg
CtoPart=2.99792458e9 # 1 C = 1 A*sec = 2.9...e9 particles
m_e_eV = m_e*cLight**2/eVtoErg
#
# Electron beam parameters:
#
Ekin=3.0e4 # kinetic energy, eV
curBeam=0.5 # current density, A/cm^2
dBeam=3.0 # beam diameter, cm
angSpread=3.0 # angular spread, mrad
trnsvT=0.5 # transversal temperature, eV
longT=2.0e-4 # longitudinal temperature, eV (was 2.0e-4)
nField=1 # number ov values of the magnetic field
fieldB=np.zeros(nField) # magnetic field
fieldB[0]=3.e3 # Gs
omega_p=1.0e9 # plasma frequency, 1/sec
n_e=omega_p**2*m_e/(4.*pi*q_e**2) # plasma density, 3.1421e+08 cm-3
n_e1=8.e7 # plasma density, cm-3
omega_p1=np.sqrt(4.*pi*n_e1*q_e**2/m_e) # plasma frequency, 5.0459e+08 1/s
#
# Cooling system parameter:
#
coolLength=150.0 # typical length of the coolong section, cm
#
# HESR:
#
Ekin=90.8e4 # HESR kinetic energy, eV
curBeam=0.5 # HESR current beam, A
dBeam=2.0 # HESR beam diameter, cm
angSpread=0.0 # HESR angular spread, mrad
trnsvT=0.2 # HESR transversal temperature, eV
longT=1.0e-2 # HESR longitudinal temperature, eV (was 2.0e-4)
fieldB[0]=1.e3 # HESR, Gs
coolLength=270.0 # HESR typical length of the coolong section, cm
#
# EIC:
#
angSpread=0.0 # EIC angular spread, mrad
fieldB[0]=5.e4 # EIC, Gs
coolLength=300.0 # EIC typical length of the coolong section, cm
#
# Calculated parameters of the electron beam:
#
V0 = cLight*np.sqrt(Ekin/m_e_eV*(Ekin/m_e_eV+2.))/(Ekin/m_e_eV+1.)
print ('V0 =%e' % V0)
tetaV0=0. # angle between V0 and magnetic field, rad
B_mag=fieldB[0]*np.cos(tetaV0) # magnetic field acting on an electron, Gs
rmsTrnsvVe=np.sqrt(2.*trnsvT*eVtoErg/m_e) # RMS transversal velocity, cm/s
rmsLongVe=np.sqrt(2.*longT*eVtoErg/m_e) # RMS longitudinal velocity, cm/s
# HESR:
dens=curBeam*(CtoPart/q_e)/(pi*(.5*dBeam)**2*V0) # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('HESR: dens = %e,omega_p = %e' % (dens,omega_p))
# EIC:
rmsLongVe = 1.0e+7 # cm/s
longT = .5*m_e*rmsLongVe**2/eVtoErg
rmsTrnsvVe = 4.2e+7 # cm/s
trnsvT = .5*m_e*rmsTrnsvVe**2/eVtoErg
print ('EIC: rmsLongVe = %e, longT = %e, rmsTrnsvVe = %e, trnsvT = %e' % \
(rmsLongVe,longT,rmsTrnsvVe,trnsvT))
dens=2.e9 # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('EIC: dens = %e,omega_p = %e' % (dens,omega_p))
cyclFreq=q_e*B_mag/(m_e*cLight) # cyclotron frequency, 1/s
rmsRoLarm=rmsTrnsvVe*cyclFreq**(-1) # RMS Larmor radius, cm
dens=omega_p**2*m_e/(4.*pi*q_e**2) # density, 1/cm^3
likeDebyeR=(3./dens)**(1./3.) # "Debye" sphere with 3 electrons, cm
eTempTran=trnsvT # to keep variable from previous script
eTempLong=longT # to keep variable from previous script
coolPassTime=coolLength/V0 # time pass through cooling section, cm
thetaVi=0. # polar angle ion and cooled electron beams, rad
phiVi=0. # azimuth angle ion and cooled electron beams, rad
powV0=round(np.log10(V0))
mantV0=V0/(10**powV0)
pow_n_e=round(np.log10(n_e))
mant_n_e=n_e/(10**pow_n_e)
#
# Formfactor ffForm for friction force:
#
# ffForm = 2*pi*dens*q_e**4/(m_e*V0**2)=
# = 0.5*omega_p**2*q_e**2/V0**2
#
# Dimension of ffForm is force: g*cm/sec**2=erg/cm
#
# 1 MeV/m = 1.e6*eVtoErg/100. g*cm/sec**2 = 1.e4*eVtoErg erg/cm
MeV_mToErg_cm=1.e4*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/MeV_mToErg_cm # MeV/m
eV_mToErg_m=100.*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/eV_mToErg_m # =-6.8226e-12 eV/m
eV_mInErg_cm=100.*eVtoErg
ffForm=-.5*omega_p**2*q_e**2/V0**2/eVtoErg # =-6.8226e-10 eV/cm
ffForm=100.*ffForm # =-6.8226e-08 eV/m
ergToEV = 1./1.60218e-12
#
# Relative velocities of electrons:
#
relVeTrnsv=rmsTrnsvVe/V0
relVeLong=rmsLongVe/V0
print ('V0=%e cm/s, rmsTrnsvVe=%e cm/s (rel = %e), rmsLongVe=%e cm/s (rel = %e)' % \
(V0,rmsTrnsvVe,relVeTrnsv,rmsLongVe,relVeLong))
# Indices:
(Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6)
stepsNumberOnGyro = 25 # number of the steps on each Larmour period
'''
#
# Opening the input file:
#
inputFile='areaOfImpactParameter_tAC-v6_fig110.data'
print ('Open input file "%s"...' % inputFile)
inpfileFlag=0
try:
inpfile = open(inputFile,'r')
inpfileFlag=1
except:
print ('Problem to open input file "%s"' % inputFile)
if inpfileFlag == 1:
print ('No problem to open input file "%s"' % inputFile)
lines=0 # Number of current line from input file
dataNumber=0 # Number of current value of any types of Data
xAboundary=np.zeros(100)
xBboundary=np.zeros(100)
while True:
lineData=inpfile.readline()
# print ('line=%d: %s' % (lines,lineData))
if not lineData:
break
lines += 1
if lines > 4:
words=lineData.split()
nWords=len(words)
# print ('Data from %d: words=%s, number of entries = %d' % (lines,words,nWords))
xAboundary[dataNumber]=float(words[0])
xBboundary[dataNumber]=float(words[1])
dataNumber += 1
inpfile.close()
print ('Close input file "%s"' % inputFile)
'''
#====================================================================
#
#------------------ Begin of defined functions -----------------------
#
# Larmor frequency electron:
#
def omega_Larmor(mass,B_mag):
return (q_elec)*B_mag/(mass*clight*1.e+2) # rad/sec
#
# Derived quantities:
#
omega_L = omega_Larmor(m_elec,B_mag) # rad/sec
T_larm = 2*pi/omega_L # sec
timeStep = T_larm/stepsNumberOnGyro # time step, sec
print ('omega_Larmor= %e rad/sec, T_larm = %e sec, timeStep = %e sec' % \
(omega_L,T_larm,timeStep))
nLarmorAvrgng=10 # number of averaged Larmor rotations
#
# Data to integrate transferred momemta over the track:
#
timeStep_c=nLarmorAvrgng*stepsNumberOnGyro*timeStep # sec
print ('timeStep_c = %e s' % timeStep_c)
eVrmsTran = np.sqrt(2.*eTempTran*eVtoErg/m_elec) # cm/sec
eVrmsLong = np.sqrt(2.*eTempLong*eVtoErg/m_elec) # cm/sec
kinEnergy = m_elec*(eVrmsTran**2+eVrmsLong**2)/2. # kinetic energy; erg
print ('eVrmsTran = %e cm/sec, eVrmsLong = %e cm/sec, kinEnergy = %e eV' % \
(eVrmsTran,eVrmsLong,ergToEV*kinEnergy))
ro_larmRMS = eVrmsTran/omega_L # cm
print ('ro_larmRMS =%e mkm' % (1.e4*ro_larmRMS))
#
# Electrons are magnetized for impact parameter >> rhoCrit:
#
rhoCrit=math.pow(q_elec**2/(m_elec*omega_L**2),1./3) # cm
print ('rhoCrit (mkm) = ' , 1.e+4*rhoCrit)
#
# Convertion from 6-vector of relectron's "coordinates" to 6-vector
# of guiding-center coordinates:
# z_e=(x_e,px_e,y_e,py_e,z_e,pz_e) --> zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e);
#
def toGuidingCenter(z_e):
mOmega=m_elec*omega_L # g/sec
zgc_e=z_e.copy() # 6-vector
zgc_e[Ix] = np.arctan2(z_e[Ipx]+mOmega*z_e[Iy],z_e[Ipy]) # radians
zgc_e[Ipx]= (((z_e[Ipx]+mOmega*z_e[Iy])**2+z_e[Ipy]**2)/(2.*mOmega)) # g*cm**2/sec
zgc_e[Iy] =-z_e[Ipx]/mOmega # cm
zgc_e[Ipy]= z_e[Ipy]+mOmega*z_e[Ix] # g/sec
return zgc_e
#
# Convertion from 6-vector of guiding-center coordinates to 6-vector
# of electron's "coordinates":
# zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e) --> z_e=(x_e,px_e,y_e,py_e,z_e,pz_e);
#
def fromGuidingCenter(zgc_e):
mOmega=m_elec*omega_L # g/sec
rho_larm=np.sqrt(2.*zgc_e[Ipx]/mOmega) # cm
z_e = zgc_e.copy() # 6-vector
z_e[Ix] = zgc_e[Ipy]/mOmega-rho_larm*np.cos(zgc_e[Ix]) # cm
z_e[Ipx]=-mOmega*zgc_e[Iy] # g*cm/sec
z_e[Iy] = zgc_e[Iy]+rho_larm*np.sin(zgc_e[Ix]) # cm
z_e[Ipy]= mOmega*rho_larm*np.cos(zgc_e[Ix]) # g*cm/sec
return z_e
#
# Matrix to dragg electron through the solenoid with field 'B_mag'
# during time interval 'deltaT':
#
def solenoid_eMatrix(B_mag,deltaT):
slndMtrx=np.identity(6)
omega_L=omega_Larmor(m_elec,B_mag) # rad/sec
mOmega= m_elec*omega_L # g/sec
phi=omega_L*deltaT # phase, rad
cosPhi=math.cos(phi) # dimensionless
sinPhi=math.sin(phi) # dimensionless
cosPhi_1=2.*math.sin(phi/2.)**2 # dimensionless
slndMtrx[Iy, Iy ]= cosPhi # dimensionless
slndMtrx[Ipy,Ipy]= cosPhi # dimensionless
slndMtrx[Iy, Ipy]= sinPhi/mOmega # sec/g
slndMtrx[Ipy,Iy ]=-mOmega*sinPhi # g/sec
slndMtrx[Iz, Ipz]= deltaT/m_elec # sec/g
slndMtrx[Ix, Ipx]= sinPhi/mOmega # sec/g
slndMtrx[Ix, Iy ]= sinPhi # dimensionless
slndMtrx[Ix, Ipy]= cosPhi_1/mOmega # sec/g
slndMtrx[Iy, Ipx]=-cosPhi_1/mOmega # sec/g
slndMtrx[Ipy,Ipx]=-sinPhi # dimensionless
return slndMtrx
#
# Matrix to dragg particle through the drift during time interval 'deltaT':
#
def drift_Matrix(M_prtcl,deltaT):
driftMtrx = np.identity(6)
for i in (Ix,Iy,Iz):
driftMtrx[i,i+1]=deltaT/M_prtcl # sec/g
return driftMtrx
#
# Matrix to dragg electron in the "guiding center" system during time interval 'deltaT':
#
def guidingCenter_Matrix(deltaT):
gcMtrx = np.identity(6)
gcMtrx[Iz,Ipz]=deltaT/m_elec # sec/g
return gcMtrx
#
# Description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron:
#
def guidingCenterCollision(vectrElec_gc,vectrIon,deltaT):
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3/2) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
b_gc=np.sqrt((vectrIon[0]-x_gc)**2+ \
(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-dpFactor_gc*deltaT*(vectrIon[0]-x_gc)/b_gc**3
dpIon[1]=-dpFactor_gc*deltaT*(vectrIon[2]-vectrElec_gc[2])/b_gc**3
dpIon[2]=-dpFactor_gc*deltaT*(vectrIon[4]-vectrElec_gc[4])/b_gc**3
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,b_gc
#
# "Magnus expansion" description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron and electron y_gc coordinate
# as well calculated parameters C1,C2,C3,b,D1,D2,q for testing:
#
def MagnusExpansionCollision(vectrElec_gc,vectrIon,deltaT):
# print ('Ion: x=%e, y=%e, z=%e' % (vectrIon[0],vectrIon[2],vectrIon[4]))
# print ('Electron: x=%e, y=%e, z=%e' %
# (vectrElec_gc[0],vectrElec_gc[4],vectrElec_gc[4]))
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3./2.) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
# C1=np.sqrt((vectrIon[0]-x_gc)**2+ \
# (vectrIon[2]-vectrElec_gc[2])**2+ \
# (vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm^2
C1=(vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm # cm^2
C2=2.*((vectrIon[0]-x_gc)*vectrIon[1]/M_ion+ \
(vectrIon[2]-vectrElec_gc[2])*vectrIon[3]/M_ion+ \
(vectrIon[4]-vectrElec_gc[4])* \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)) # cm^2/sec
C3=(vectrIon[1]/M_ion)**2+(vectrIon[3]/M_ion)**2+ \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)**2 # cm^2/sec^2
b=np.sqrt(C1+C2*deltaT+C3*deltaT**2) # cm
D1=(2.*C3*deltaT+C2)/b-C2/np.sqrt(C1) # cm/sec
D2=(C2*deltaT+2.*C1)/b-2.*np.sqrt(C1) # cm
q=4.*C1*C3-C2**2 # cm^4/sec^2
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-2.*dpFactor_gc/q*((vectrIon[0]-x_gc)*D1-vectrIon[1]/M_ion*D2)
dpIon[1]=-2.*dpFactor_gc/q*((vectrIon[2]-vectrElec_gc[2])*D1- \
vectrIon[3]/M_ion*D2)
dpIon[2]=-2.*dpFactor_gc/q*((vectrIon[4]-vectrElec_gc[4])*D1- \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)*D2)
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
dy_gc=dpIon[0]/mOmegaLarm # cm
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,dy_gc,C1,C2,C3,b,D1,D2,q
#
# Minimized functional (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
# Funcional = {log10(funcY) - [fitB*log10(argX) + fitA]}^2
#
def fitting(nPar1,nPar2,argX,funcY):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
for i in range(nVion):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
sumFuncY = np.zeros(nPar2)
sumArgXfuncY= np.zeros(nPar2)
fitA = np.zeros(nPar2)
fitB = np.zeros(nPar2)
for i in range(nPar2):
for n in range(nPar1):
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
sumFuncY[i] += log10funcY[n,i]
sumArgXfuncY[i] += log10argX[n,i]*log10funcY[n,i]
delta = sumArgX[i]**2-nPar1*sumArgX2[i]
fitA[i] = (sumArgX[i]*sumArgXfuncY[i]-sumArgX2[i]*sumFuncY[i])/delta
fitB[i] = (sumArgX[i]*sumFuncY[i]-nPar1*sumArgXfuncY[i])/delta
# print ('fitA(%d) = %e, fitB(%d) = %e' % (i,fitA[i],i,fitB[i]))
argXfit = np.zeros((nPar1,nPar2))
funcYfit = np.zeros((nPar1,nPar2))
funcHi2 = np.zeros(nPar2)
for i in range(nPar2):
factorA = math.pow(10.,fitA[i])
for n in range(nPar1):
argXfit[n,i] = math.pow(10.,log10argX[n,i])
funcYfit[n,i] = factorA*math.pow(argXfit[n,i],fitB[i])
funcHi2[i] += (np.log10(abs(funcY[n,i])) - np.log10(abs(funcYfit[n,i])))**2
return fitA,fitB,funcHi2,argXfit,funcYfit
#
# +-Errors for fitied parameters fitA and fitB:
#
def errFitAB(nPar1,nPar2,argX,funcY,fitA,fitB,funcHi2,errVar,errType):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
posErrFit = np.zeros(nPar2)
negErrFit = np.zeros(nPar2)
# return posErrFit,negErrFit
stepA = 5.e-4*mean(funcHi2)
stepB = 1.e-4*mean(funcHi2)
# print ('errFitAB: mean(funcHi2) = %e, stepA = %e, stepB = %e' % (mean(funcHi2),stepA,stepB))
for i in range(nPar2):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); positive error) for %d' % (errVar,i))
break
# print ('i=%d: fitParamtr = %e, funcHi2 = %e' % (i,fitParamtr[i], funcHi2[i]))
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] + k*stepA
curFuncHi2 = 0.
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] + k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
posErrFit[i] = abs(curFitA - fitA[i])
else:
posErrFit[i] = abs(curFitB - fitB[i])
func1sigma2 = funcHi2[i]/(nPar2-3)
if (int(errVar) == 1):
fitSigma = np.sqrt(sumArgX2[i]/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
else:
fitSigma = np.sqrt(nPar2/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
if (int(errType) == 2):
posErrFit[i] = fitSigma
# if (int(errVar) == 1):
# print ('i=%d: fitA = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); negative error) for %d' % (errVar,i))
break
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] - k*stepA
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] - k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
negErrFit[i] = abs(curFitA - fitA[i])
else:
negErrFit[i] = abs(curFitB - fitB[i])
if (int(errType) == 2):
negErrFit[i] = posErrFit[i]
# if (errVar == 1):
# print ('i=%d: fitA = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],negErrFit[i],funcHi2[i],k,curFuncHi2))
return posErrFit,negErrFit
def fittedGKintegration(xMin,xMax,fitA,fitB):
#
# "Gauss-Kronrod" method of integration (GK)
#
#
# Points (psi_i) and weigths (w_i) to integrate for interval from -1 to 1;
# These data are from <NAME>. "Handbook of Mathematical Science".
# 5th Edition, CRC Press, Inc, 1978.
#
# To integrate for interval from 0 to 1 it is necessary to change points
# psi_i with points ksi_i=(1+psi_i)/2;
#
# For method with order N for function F(x):
# int_(-1)^1 = sum_1^N [w_i* F(psi_i)];
#
# In case of integration over interval from a to b:
# int_(a)^b = (b-a)/2 * sum_1^N [w_i* F(x_i)], where
# x_i = (b-a)*psi_i/2+(a+b)/2.
#
#----------------------------------------------------
#
# Data for GK:
#
#----------------------------------------------------
nPoints_GK = 16
psi_16=np.array([-0.9894009, -0.9445750, -0.8656312, -0.7554044, -0.6178762, \
-0.4580168, -0.2816036, -0.0950125, 0.0950125, 0.2816036, \
0.4580168, 0.6178762, 0.7554044, 0.8656312, 0.9445750, \
0.9894009])
w_16 =np.array([ 0.0271525, 0.0622535, 0.0951585, 0.1246290, 0.1495960, \
0.1691565, 0.1826034, 0.1894506, 0.1894506, 0.1826034, \
0.1691565, 0.1495960, 0.1246290, 0.0951585, 0.0622535, \
0.0271525])
y = np.zeros(nPoints_GK)
yIntegrated = 0.
for n in range(nPoints_GK):
xCrrnt = psi_16[n]*(xMax-xMin)/2 + (xMax+xMin)/2.
factorA = math.pow(10.,fitA)
y[n] = factorA*math.pow(xCrrnt,fitB)
yIntegrated += (xMax-xMin)*w_16[n]*y[n]*xCrrnt
return y,yIntegrated
#------------------ End of defined functions -----------------------
#
#====================================================================
sphereNe=3.
R_e=math.pow(sphereNe/n_e,1./3) # cm
print ('R_e (cm)=%e' % R_e)
ro_Larm = eVrmsTran/omega_L # cm
print ('ro_Larm (cm)=%e' % ro_Larm)
impctPrmtrMin=2.*ro_Larm
# rhoDependenceFlag = 1 # skip calculation of rho dependence if = 0!
#============ Important flags ===========================
#
# Taking into account the transfer of momenta for both particles
# (for "classical" only):
dpTransferFlag = 1 # no taking into account if = 0!
#
saveFilesFlag = 0 # no saving if = 0!
#
plotFigureFlag = 1 # plot if = 1!
#
#========================================================
nVion=50
Vion=np.zeros(nVion)
VionLong=np.zeros(nVion)
VionTrnsv=np.zeros(nVion)
VionRel=np.zeros(nVion)
vIonMin=4.e-3*eVrmsTran
vIonMax=10.*eVrmsTran
vIonMinRel=vIonMin/V0
vIonMaxRel=vIonMax/V0
print ('VionMin=%e (vIonMinRel=%e), vIonMax=%e (vIonMaxRel=%e)' % \
(vIonMin,vIonMinRel,vIonMax,vIonMaxRel))
vIonLogStep=math.log10(vIonMax/vIonMin)/(nVion-1)
R_debye=np.zeros(nVion)
R_pass=np.zeros(nVion)
R_pass_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
impctPrmtrMax=np.zeros(nVion)
impctPrmtrMax_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
for i in range(nVion):
crrntLogVionRel=math.log10(vIonMinRel)+i*vIonLogStep
VionRel[i]=math.pow(10.,crrntLogVionRel)
Vion[i]=VionRel[i]*V0
VionLong[i]=Vion[i]*np.cos(thetaVi)
VionTrnsv[i]=Vion[i]*np.sin(thetaVi)
R_debye[i]=np.sqrt(Vion[i]**2+eVrmsTran**2+eVrmsLong**2)/omega_p
R_pass[i]=np.sqrt(Vion[i]**2+eVrmsLong**2)*coolPassTime
R_pass_1[i]=np.sqrt(Vion[i]**2+0.*eVrmsLong**2)*coolPassTime
help=max(R_debye[i],R_e)
impctPrmtrMax[i]=min(help,R_pass[i])
impctPrmtrMax_1[i]=min(help,R_pass_1[i])
#-----------------------------------------------------------------
# Checking of corection of the maximal impact parameter on depence
# of preset number of minimal Larmor turns
#
larmorTurnsMin=[10,20,30,40]
impctPrmtrMaxCrrctd=np.zeros((nVion,4))
impctPrmtrMaxCrrctdRel=np.zeros((nVion,4))
for n in range (4):
for i in range(nVion):
impctPrmtrMaxCrrctd[i,n]=impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurnsMin[n]*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
impctPrmtrMaxCrrctdRel[i,n]=impctPrmtrMaxCrrctd[i,n]/impctPrmtrMax[i]
#
# First plotting:
#
if (plotFigureFlag == 0):
fig10 = plt.figure(10)
plt.semilogx(impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,0],'-r', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,1],'-b', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,2],'-g', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,3],'-m',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Maximal Impact parameter $R_{max}$, cm',color='m',fontsize=16)
plt.ylabel('$R_{max}^{Crrctd}/R_{Max}$',color='m',fontsize=16)
# plt.xlim([.9*min(impctPrmtrMax),1.1*max(impctPrmtrMax)])
plt.xlim([1.e-2,1.1*max(impctPrmtrMax)])
plt.ylim([.986,1.001])
titleHeader='$R_{max}^{Crrctd}=R_{Max} \cdot [1-(\pi\cdot N_{Larm} \cdot'
titleHeader += '\Delta_{e||}/(\omega_{Larm} \cdot R_{max})]^{1/2}$'
plt.title(titleHeader,color='m',fontsize=16)
plt.legend([('$N_{Larm}=$%2d' % larmorTurnsMin[0]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[1]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[2]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[3])],loc='lower center',fontsize=14)
if (saveFilesFlag == 1):
fig10.savefig('picturesCMA/correctedRmax_fig10cma.png')
print ('File "picturesCMA/correctedRmax_fig10cma.png" is written')
xLimit=[.9*VionRel[0],1.1*VionRel[nVion-1]]
#
# Typs of collisions:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'Types of Collisions: $V_{e0}=%4.2f\cdot10^{%2d}$ cm/s, $B=%6.1f$ Gs'
plt.title(titleHeader % (mantV0,powV0,fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,.0018,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-4,1.75e-3,'$R_{min}=2\cdot<rho_\perp>$',color='k',fontsize=16)
plt.text(7.e-4,5.e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.85e-5,3.3e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(1.e-4,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(1.e-4,10.e-4,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.25e-5,.275,'Collisions are Screened',color='r',fontsize=20)
plt.text(1.6e-5,1.e-3,'$ \cong 20\cdot R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
#
# Picture for HESR:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'HESR Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(4.4e-4,8.4e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.e-4,8.4e-4,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.7e-6,3.4e-3,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(2.8e-4,.1,'$R_{max}$',color='k',fontsize=16)
plt.text(1.e-4,1.8e-2,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(6.8e-5,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(2.3e-5,1.95e-3,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.e-5,.275,'Screened Collisions',color='r',fontsize=20)
plt.text(3.58e-6,2.05e-3,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('HESRimpctPrmtr_fig3151cma.png')
print ('File "HESRimpctPrmtr_fig3151cma.png" is written')
#
# Picture for EIC:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'EIC Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[5.e-5,.3]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(9.e-4,4.e-5,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.7e-4,3.e-5,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(6.3e-6,1.1e-4,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(1.e-4,2.1e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.57e-5,5.e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(2.3e-5,1.e-3,'Magnetized Collisions',color='r',fontsize=20)
# plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(1.1e-5,5.7e-5,'Weak or Adiabatic or Fast Collisions',color='r',fontsize=16)
plt.text(2.e-5,.15,'Screened Collisions',color='r',fontsize=20)
plt.text(2.5e-3,1.7e-4,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('EICimpctPrmtr_fig3151cma.png')
print ('File "EICimpctPrmtr_fig3151cma.png" is written')
# plt.show()
# sys.exit()
#
# Magnetized collisions:
#
if (plotFigureFlag == 0):
fig209=plt.figure (209)
plt.loglog(VionRel,R_debye,'-r',VionRel,R_pass,'-b', \
VionRel,R_pass_1,'--b',linewidth=2)
plt.grid(True)
hold=True
plt.plot([VionRel[0],VionRel[nVion-1]],[R_e,R_e],color='m',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=16)
plt.ylabel('$R_{Debye}$, $R_{Pass}$, $R_e$, cm',color='m',fontsize=16)
# titleHeader='Magnetized Collision: $R_{Debye}$, $R_{Pass}$, $R_e$: $V_{e0}=%5.3f\cdot10^{%2d}$cm/s'
# plt.title(titleHeader % (mantV0,powV0),color='m',fontsize=16)
plt.title('Magnetized Collisions: $R_{Debye}$, $R_{Pass}$, $R_e$',color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[1.e-3,10.]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.5e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,0.001175,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-5,2.45e-3,'$R_e$',color='k',fontsize=16)
plt.text(3.e-5,5.e-2,'$R_{Debye}$',color='k',fontsize=16)
plt.text(3.e-5,1.8e-2,'$R_{Pass}$',color='k',fontsize=16)
plt.text(4.5e-5,4.8e-3,'$R_{Pass}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.text(8.3e-5,4.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
if (saveFilesFlag == 1):
fig209.savefig('picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png')
print ('File "picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png" is written')
#
# Coulomb logarithm evaluation:
#
clmbLog = np.zeros(nVion)
for i in range(nVion):
clmbLog[i] = math.log(impctPrmtrMax[i]/impctPrmtrMin)
# clmbLog[i] = math.log(impctPrmtrMax_1[i]/impctPrmtrMin)
if (plotFigureFlag == 0):
fig3155=plt.figure (3155)
plt.semilogx(VionRel,clmbLog,'-xr',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Coulomb Logarithm $L_c$',color='m',fontsize=14)
plt.title('Coulomb Logarithm: $L_c$ = $ln(R_{max}/R_{min})$',color='m',fontsize=16)
yLimit=[min(clmbLog)-.1,max(clmbLog)+.1]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(3.4e-5,5.,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3155.savefig('picturesCMA_v7/coulombLogrthm_fig3155cma.png')
print ('File "picturesCMA_v7/coulombLogrthm_fig3155cma.png" is written')
#
# matrix for electron with .5*timeStep_c:
#
matr_elec_c=guidingCenter_Matrix(.5*timeStep_c)
#
# matrix for ion with mass M_ion and .5*timeStep_c:
#
matr_ion_c=drift_Matrix(M_ion,.5*timeStep_c)
larmorTurns = 10
nImpctPrmtr = 50
rhoMin = impctPrmtrMin
rhoMax = np.zeros(nVion)
log10rhoMin = math.log10(rhoMin)
crrntImpctPrmtr = np.zeros(nImpctPrmtr)
halfLintr = np.zeros((nImpctPrmtr,nVion))
pointAlongTrack = np.zeros((nImpctPrmtr,nVion))
totalPoints = 0
for i in range(nVion):
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
# print ('rhoMax(%d) = %e' % (i,rhoMax[i]))
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# print (' rhoCrrnt(%d) = %e' % (n,rhoCrrnt))
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
timeHalfPath = halfLintr[n,i]/eVrmsLong # 0.5 time of interaction; sec
numbLarmor = int(2.*timeHalfPath/T_larm)
pointAlongTrack[n,i] = int(2.*timeHalfPath/timeStep_c)
totalPoints += pointAlongTrack[n,i]
# print (' %d: rhoCrrnt = %e, numbLarmor = %d, pointAlongTrack = %d' % \
# (n,rhoCrrnt,numbLarmor,pointAlongTrack[n,i]))
# print ('totalPoints = %d' % totalPoints)
totalPoints = int(totalPoints)
nnTotalPoints=np.arange(0,2*totalPoints-1,1)
arrayA=np.zeros(2*totalPoints)
arrayB=np.zeros(2*totalPoints)
bCrrnt_c = np.zeros(2*totalPoints)
#
# Variables for different testing:
#
b_gc = np.zeros(totalPoints)
action_gc = np.zeros(totalPoints)
C1test = np.zeros(totalPoints)
C2test = np.zeros(totalPoints)
C3test = np.zeros(totalPoints)
b_ME = np.zeros(totalPoints)
D1test = np.zeros(totalPoints)
D2test = np.zeros(totalPoints)
qTest = np.zeros(totalPoints)
action_ME = np.zeros(totalPoints)
actn_gc_ME_rel = np.zeros(totalPoints)
indxTest = 0
rhoInit = np.zeros((nImpctPrmtr,nVion))
#
# "Classical" approach:
#
deltaPx_c = np.zeros((nImpctPrmtr,nVion))
deltaPy_c = np.zeros((nImpctPrmtr,nVion))
deltaPz_c = np.zeros((nImpctPrmtr,nVion))
ionVx_c = np.zeros((nImpctPrmtr,nVion))
ionVy_c = np.zeros((nImpctPrmtr,nVion))
ionVz_c = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_c = np.zeros((nImpctPrmtr,nVion))
#
# "Magnus Expand" approach:
#
deltaPx_m = np.zeros((nImpctPrmtr,nVion))
deltaPy_m = np.zeros((nImpctPrmtr,nVion))
deltaPz_m = np.zeros((nImpctPrmtr,nVion))
ionVx_m = np.zeros((nImpctPrmtr,nVion))
ionVy_m = np.zeros((nImpctPrmtr,nVion))
ionVz_m = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_m = np.zeros((nImpctPrmtr,nVion))
#
# Comparison of approaches (ratio deltaEnrgIon_c/deltaEnrgIon_m):
#
deltaPx_c_m = np.zeros((nImpctPrmtr,nVion))
deltaPy_c_m = np.zeros((nImpctPrmtr,nVion))
deltaPz_c_m = np.zeros((nImpctPrmtr,nVion))
dEion_c_m = np.zeros((nImpctPrmtr,nVion))
#
# Factor to calculate transferred energy to ion
# (the friction force is defined by this transfered energy):
#
deFactor = 0.5/M_ion # 1/g
frctnForce_cSM = np.zeros(nVion) # integration, using Simpson method
frctnForce_mSM = np.zeros(nVion) # integration, using Simpson method
numberWrongSign_c=0
numberWrongSign_m=0
posSignDeltaEnrgIon_c=0
negSignDeltaEnrgIon_c=0
posSignDeltaEnrgIon_m=0
negSignDeltaEnrgIon_m=0
timeRun = np.zeros(nVion)
totalTimeRun = 0.
indx = 0
# ----------------- Main simulation ---------------
#
for i in range(nVion):
# Taking into account the corection of the maximal impact parameter
# on depence of preset number of minimal Larmor turns:
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
# Without taking into account the corection of the maximal impact parameter
# on depence of preset number of minimal Larmor turns:
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
timeStart=os.times()
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# rhoInit[i*nImpctPrmtr+n] = rhoCrrnt
rhoInit[n,i] = rhoCrrnt
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
z_ionCrrnt_c = np.zeros(6) # Zeroing out of vector for ion ("GC"-approach)
z_elecCrrnt_c = np.zeros(6) # Zeroing out of vector for electron ("GC"-approach)
z_ionCrrnt_m = np.zeros(6) # Zeroing out of vector for ion ("ME"-approach)
z_elecCrrnt_m = np.zeros(6) # Zeroing out of vector for electron ("ME"-approach)
# Zeroing out of "guiding center" vector for electron (both approaches):
z_elecCrrnt_gc_c = np.zeros(6)
z_elecCrrnt_gc_m = np.zeros(6)
# Current values of transfered momemta
# (second index numerates "Guiding Center", (if 0) and
# "Magnus Expantion" (if 1) approaches:
dpCrrnt = np.zeros((3,2))
# Intermediate arrays:
dpIon_c = np.zeros(3)
dpIon_m = np.zeros(3)
dpElec_c = np.zeros(3)
dpElec_m = np.zeros(3)
# Current initial vector for electron:
z_elecCrrnt_c[Ix] = rhoCrrnt # x, cm
z_elecCrrnt_c[Iz] = -halfLintr[n,i] # z, cm
z_elecCrrnt_c[Ipy] = m_elec*eVrmsTran # py, g*cm/sec
z_elecCrrnt_c[Ipz] = m_elec*eVrmsLong # pz, g*cm/sec
z_elecCrrnt_m[Ix] = rhoCrrnt # x, cm
z_elecCrrnt_m[Iz] = -halfLintr[n,i] # z, cm
z_elecCrrnt_m[Ipy] = m_elec*eVrmsTran # py, g*cm/sec
z_elecCrrnt_m[Ipz] = m_elec*eVrmsLong # pz, g*cm/sec
# Current initial vector for ion velocity for both approaches:
ionVx_c[n,i] = VionTrnsv[i]*np.cos(phiVi)
ionVy_c[n,i] = VionTrnsv[i]*np.sin(phiVi)
ionVz_c[n,i] = VionLong[i]
ionVx_m[n,i] = VionTrnsv[i]*np.cos(phiVi)
ionVy_m[n,i] = VionTrnsv[i]*np.sin(phiVi)
ionVz_m[n,i] = VionLong[i]
# transfer to system of guiding center:
z_elecCrrnt_gc_c=toGuidingCenter(z_elecCrrnt_c)
z_elecCrrnt_gc_m=toGuidingCenter(z_elecCrrnt_m)
#
# Main loop along the each track:
#
for k in range(int(pointAlongTrack[n,i])):
#
# Dragging both particles through first half of the step of the track:
#
z_elecCrrnt_gc_c = np.dot(matr_elec_c,z_elecCrrnt_gc_c) # electron
z_elecCrrnt_gc_m = np.dot(matr_elec_c,z_elecCrrnt_gc_m) # electron
z_ionCrrnt_c = np.dot(matr_ion_c,z_ionCrrnt_c) # ion
z_ionCrrnt_m = np.dot(matr_ion_c,z_ionCrrnt_m) # ion
# transfer from system of guiding center:
z_elecCrrnt_c=fromGuidingCenter(z_elecCrrnt_gc_c)
z_elecCrrnt_m=fromGuidingCenter(z_elecCrrnt_gc_m)
# Current distance between ion and electron; cm:
bCrrnt_c[indx]=np.sqrt((z_ionCrrnt_c[0]-z_elecCrrnt_c[0])**2+ \
(z_ionCrrnt_c[2]-z_elecCrrnt_c[2])**2+ \
(z_ionCrrnt_c[4]-z_elecCrrnt_c[4])**2)
# Current values of parameters A,B:
arrayA[indx] = math.log10(ro_Larm/bCrrnt_c[indx])
arrayB[indx] = math.log10((q_elec**2/bCrrnt_c[indx])/kinEnergy)
indx += 1
#
# Dragging both particles through interaction during this step of track
# (for both approaches):
#
# "Guiding Center":
dpIon_c,dpElec_c,action,b_gc_c = \
guidingCenterCollision(z_elecCrrnt_gc_c,z_ionCrrnt_c,timeStep_c)
# "Magnus Expantion":
dpIon_m,dpElec_m,actionME,dy_gc_m,C1,C2,C3,b,D1,D2,q = \
MagnusExpansionCollision(z_elecCrrnt_gc_m,z_ionCrrnt_m,timeStep_c)
# Save data for testing:
b_gc[indxTest] = b_gc_c # "Guiding Center" approach
action_gc[indxTest] = action # -"- -"- -"- -"- -"- -"-
C1test[indxTest] = C1 # "Magnus expansion" approach
C2test[indxTest] = abs(C2) # -"- -"- -"- -"- -"- -"-
C3test[indxTest] = C3 # -"- -"- -"- -"- -"- -"-
b_ME[indxTest] = b # -"- -"- -"- -"- -"- -"-
D1test[indxTest] = D1 # -"- -"- -"- -"- -"- -"-
D2test[indxTest] = D2 # -"- -"- -"- -"- -"- -"-
qTest[indxTest] = q #-"- -"- -"- -"- -"- -"-
action_ME[indxTest] = actionME #-"- -"- -"- -"- -"- -"-
indxTest += 1
indxTestMax = indxTest
#
# Taking into account transfer of momentum for both particles:
#
if (dpTransferFlag == 1):
for ic in range(3):
z_ionCrrnt_c[2*ic+1] += dpIon_c[ic]
z_elecCrrnt_c[2*ic+1] += dpElec_c[ic]
z_ionCrrnt_m[2*ic+1] += dpIon_m[ic]
z_elecCrrnt_m[2*ic+1] += dpElec_m[ic]
# transfer to system of guiding center:
z_elecCrrnt_gc_c=toGuidingCenter(z_elecCrrnt_c)
z_elecCrrnt_gc_m=toGuidingCenter(z_elecCrrnt_m)
# Accumulation of the transfered momenta to ion along the track for both approaches:
for ic in range(3):
# if i == 0:
# print ('dpIon_c[%2d] = %20.14e, dpIon_m[%2d] = %20.14e' % \
# (ic,dpIon_c[ic],ic,dpIon_m[ic]))
dpCrrnt[ic,0] += dpIon_c[ic] # "Guiding Center", g*cm/sec
dpCrrnt[ic,1] += dpIon_m[ic] # "Magnus Expansion", g*cm/sec
#
# Ion's elocity change along the track - both approaches:
#
ionVx_c[n,i] += dpCrrnt[0,0]/M_ion # cm/sec
ionVy_c[n,i] += dpCrrnt[1,0]/M_ion # cm/sec
ionVz_c[n,i] += dpCrrnt[2,0]/M_ion # cm/sec
ionVx_m[n,i] += dpCrrnt[0,1]/M_ion # cm/sec
ionVy_m[n,i] += dpCrrnt[1,1]/M_ion # cm/sec
ionVz_m[n,i] += dpCrrnt[2,1]/M_ion # cm/sec
#
# Dragging both particles through second half of the step of the track:
#
z_elecCrrnt_gc_c = np.dot(matr_elec_c,z_elecCrrnt_gc_c) # electron
z_ionCrrnt_c = np.dot(matr_ion_c,z_ionCrrnt_c) # ion
z_elecCrrnt_gc_m = np.dot(matr_elec_c,z_elecCrrnt_gc_m) # electron
z_ionCrrnt_m = np.dot(matr_ion_c,z_ionCrrnt_m) # ion
# transfer from system of guiding center:
z_elecCrrnt_c=fromGuidingCenter(z_elecCrrnt_gc_c)
z_elecCrrnt_m=fromGuidingCenter(z_elecCrrnt_gc_m)
# Current distance between ion and electron; cm:
bCrrnt_c[indx]=np.sqrt((z_ionCrrnt_c[0]-z_elecCrrnt_c[0])**2+ \
(z_ionCrrnt_c[2]-z_elecCrrnt_c[2])**2+ \
(z_ionCrrnt_c[4]-z_elecCrrnt_c[4])**2)
# Current values of parameters A,B:
arrayA[indx] = math.log10(ro_Larm/bCrrnt_c[indx])
arrayB[indx] = math.log10((q_elec**2/bCrrnt_c[indx])/kinEnergy)
indx += 1
#
# Transferred momenta along the track - "Guiding Center" approach:
#
deltaPx_c[n,i] = dpCrrnt[0,0] # dpx, g*cm/sec
# if deltaPx_c[n,i] <= 0.:
# print ('deltaPx_c[%2d,%2d] = %e, dpCrrnt[%2d,%2d] = %e' % \
# (n,i,deltaPx_c[n,i],n,i,dpCrrnt[0,0]))
deltaPy_c[n,i] = dpCrrnt[1,0] # dpy, g*cm/sec
# if deltaPy_c[n,i] <= 0.:
# print ('deltaPy_c[%2d,%2d] = %e' % (n,i,deltaPy_c[n,i]))
deltaPz_c[n,i] = dpCrrnt[2,0] # dpz, g*cm/sec
# if deltaPz_c[n,i] <= 0.:
# print ('deltaPz_c[%2d,%2d] = %e' % (n,i,deltaPz_c[n,i]))
# Incorrect value:
# deltaEnrgIon_c[n,i] = (dpCrrnt[0,0]**2+dpCrrnt[1,0]**2+dpCrrnt[2,0]**2)* \
# deFactor/eVtoErg # eV
# Correct value:
crrntDeltaEnrg = (dpCrrnt[0,0]*ionVx_c[n,i]+ \
dpCrrnt[1,0]*ionVy_c[n,i]+ \
dpCrrnt[2,0]*ionVz_c[n,i])*deFactor/eVtoErg # eV
absDeltaEnrgIon_c = abs(crrntDeltaEnrg)
if (crrntDeltaEnrg != 0.):
signDeltaEnrgIon_c = crrntDeltaEnrg/abs(crrntDeltaEnrg)
deltaEnrgIon_c[n,i] = crrntDeltaEnrg
if (deltaEnrgIon_c[n,i] > 0.):
posSignDeltaEnrgIon_c += 1
else:
negSignDeltaEnrgIon_c += 1
#
# Transferred momenta along the track - "Magnus expansion" approach:
#
deltaPx_m[n,i] = dpCrrnt[0,1] # dpx, g*cm/sec
# if deltaPx_m[n,i] <= 0.:
# print ('deltaPx_m[%2d,%2d] = %e' % (n,i,deltaPx_m[n,i]))
deltaPy_m[n,i] = dpCrrnt[1,1]
# if deltaPy_m[n,i] <= 0.:
# print ('deltaPy_m[%2d,%2d] = %e' % (n,i,deltaPy_m[n,i]))
deltaPz_m[n,i] = dpCrrnt[2,1]
# if deltaPz_m[n,i] <= 0.:
# print ('deltaPz_m[%2d,%2d] = %e' % (n,i,deltaPz_m[n,i]))
# Incorrect value:
# deltaEnrgIon_m[n,i] = (dpCrrnt[0,1]**2+dpCrrnt[1,1]**2+dpCrrnt[2,1]**2)* \
# deFactor/eVtoErg # eV
# Correct value absolute value):
crrntDeltaEnrg = (dpCrrnt[0,1]*ionVx_m[n,i]+ \
dpCrrnt[1,1]*ionVy_m[n,i]+ \
dpCrrnt[2,1]*ionVz_m[n,i])*deFactor/eVtoErg # eV
absDeltaEnrgIon_m = abs(crrntDeltaEnrg)
if (crrntDeltaEnrg != 0.):
signDeltaEnrgIon_m = crrntDeltaEnrg/abs(crrntDeltaEnrg)
deltaEnrgIon_m[n,i] = crrntDeltaEnrg
if (deltaEnrgIon_m[n,i] > 0.):
posSignDeltaEnrgIon_m += 1
else:
negSignDeltaEnrgIon_m += 1
#
# Comparison of the approaches (%):
#
if (deltaPx_m[n,i] != 0.):
deltaPx_c_m[n,i] = 100.*(deltaPx_c[n,i]/deltaPx_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPx_m[%d,%d] = ' % (n,i))
if (deltaPy_m[n,i] != 0.):
deltaPy_c_m[n,i] = 100.*(deltaPy_c[n,i]/deltaPy_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPy_m[%d,%d] = ' % (n,i))
if (deltaPz_m[n,i] != 0.):
deltaPz_c_m[n,i] = 100.*(deltaPz_c[n,i]/deltaPz_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPz_m[%d,%d] = ' % (n,i))
if (deltaEnrgIon_m[n,i] != 0.):
dEion_c_m[n,i] = 100.*(deltaEnrgIon_c[n,i]/deltaEnrgIon_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaEnrgIon_m[%d,%d] = ' % (n,i))
#
# Integration using Simpson method:
#
if (n > 0):
frctnForce_cSM[i] += pi*n_e*100.*(deltaEnrgIon_c[n,i]+deltaEnrgIon_c[n-1,i])* \
.5*(rhoInit[n,i]+rhoInit[n-1,i])* \
(rhoInit[n,i]-rhoInit[n-1,i]) # eV/m
frctnForce_mSM[i] += pi*n_e*100.*(deltaEnrgIon_m[n,i]+deltaEnrgIon_m[n-1,i])* \
.5*(rhoInit[n,i]+rhoInit[n-1,i])* \
(rhoInit[n,i]-rhoInit[n-1,i]) # eV/m
timeEnd = os.times()
timeRun[i] = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
totalTimeRun += timeRun[i]
print ('timeRun(%2d) = %6.3f seconds' % (i,timeRun[i]))
print ('Total time (icluding Simpson integration) = %6.3f seconds' % totalTimeRun)
print ('deltaEnrgIon_c: nPos=%d, nNeg=%d; deltaEnrgIon_m: nPos=%d, nNeg=%d' % \
(posSignDeltaEnrgIon_c,negSignDeltaEnrgIon_c, \
posSignDeltaEnrgIon_m,negSignDeltaEnrgIon_m))
#
# Output for checking:
#
# print \
# ('n Px_c Px_m Py_c Py_m Pz_c Pz_m Pz_c_m')
# for i in range(10,11,1):
# for n in range(nImpctPrmtr):
# print ('%d: %e %e %e %e %e %e %e' % \
# (n,deltaPx_c[n,i],deltaPx_m[n,i],deltaPy_c[n,i], \
# deltaPy_m[n,i],deltaPz_c[n,i],deltaPz_m[n,i],deltaPz_c_m[n,i]))
# print ('n dEion_c dEion_m')
# for i in range(10,11,1):
# for n in range(nImpctPrmtr):
# print ('%d: %e %e ' % (n,deltaEnrgIon_c[n,i],deltaEnrgIon_m[n,i]))
# print ('indxTestMax = %d' % indxTestMax)
#
# Plotting of the tests:
#
nn=np.arange(0,indxTestMax-1,1)
#
# C1:
#
if (plotFigureFlag == 0):
fig2020=plt.figure (2020)
plt.plot(nn,C1test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C1$, $cm^2$',color='m',fontsize=16)
plt.title('$C1=[x_{gc}^2+y_{gc}^2+z_e^2+2J/(m_e \cdot \Omega_e)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2020.savefig('picturesCMA_v7/magnusExpansion_C1_fig2020cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C1_fig2020cma.png" is written')
#
# C2:
#
if (plotFigureFlag == 0):
fig2030=plt.figure (2030)
plt.plot(nn,1.e-5*C2test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C2$, $\cdot 10^5$ $cm^2/s$',color='m',fontsize=16)
plt.title('$C2=2\cdot[V_{ix}\cdot(x_i-x_{gc})+V_{iy}\cdot(y_i-y_{gc})+(V_{iz}-V_{ez})\cdot(z_i-z_e)]$', \
color='m',fontsize=14)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2030.savefig('picturesCMA_v7/magnusExpansion_C2_fig2030cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C2_fig2030cma.png" is written')
#
# C3:
#
if (plotFigureFlag == 0):
fig2040=plt.figure (2040)
plt.plot(nn,1e-11*C3test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C3$, $\cdot 10^{11}$ $cm^2/s^2$',color='m',fontsize=16)
plt.title('$C3=V_{ix}^2+V_{iy}^2+(V_{iz}-V_{ez})^2$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2040.savefig('picturesCMA_v7/magnusExpansion_C3_fig2040cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C3_fig2040cma.png" is written')
#
# D1:
#
if (plotFigureFlag == 0):
fig2025=plt.figure (2025)
plt.plot(nn,1.e-5*D1test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^{-5}\cdot D1$, $cm/s$',color='m',fontsize=16)
plt.title('$D1=(2C_3\cdot \Delta t+C_2)/b_{ME}$ $-$ $C_2/C_1^{0.5}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2025.savefig('picturesCMA_v7/magnusExpansion_D1_fig2025cma.png')
print ('File "picturesCMA_v7/magnusExpansion_D1_fig2025cma.png" is written')
#
# D2:
#
if (plotFigureFlag == 0):
fig2035=plt.figure (2035)
plt.plot(nn,1.e4*D2test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^4\cdot D2$, $cm$',color='m',fontsize=16)
plt.title('$D2=(2C_1+C_2\cdot \Delta t)/b_{ME}$ $-$ $2C_1^{0.5}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2035.savefig('picturesCMA_v7/magnusExpansion_D2_fig2035cma.png')
print ('File "picturesCMA_v7/magnusExpansion_D2_fig2035cma.png" is written')
#
# Distance b_ME between particles for "ME" approach:
#
if (plotFigureFlag == 0):
fig2050=plt.figure (2050)
plt.plot(nn,b_ME[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{ME}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{ME}$ between Particles for "ME" Approach', color='m',fontsize=16)
plt.text(3500,.4,'$b_{ME}=[C1+C2\cdot \Delta t +C3 \cdot \Delta t^2]^{0.5}$', \
color='m',fontsize=16)
plt.text(33000,.36,('$(\Delta t=%8.2e$ $s)$' % timeStep_c),color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2050.savefig('picturesCMA_v7/particleDistance_me_fig2050cma.png')
print ('File "picturesCMA_v7/particleDistance_me_fig2050cma.png" is written')
#
# Distance b_gc between particles for "GC" approach:
#
if (plotFigureFlag == 0):
fig2055=plt.figure (2055)
plt.plot(nn,b_gc[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{GC}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{GC}$ between Particles for "GC" Approach', color='m',fontsize=16)
plt.text(0,.4,'$b_{GC}=[(x_i-x_{gc})^2+(y_i-y_{gc})^2+$',color='m',fontsize=16)
plt.text(55500,.36,'$+(z_i-z_e)^2+2J/(m_e \cdot \Omega_e)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2055.savefig('picturesCMA/particleDistance_gc_fig2055cma.png')
print ('File "picturesCMA/particleDistance_gc_fig2055cma.png" is written')
#
# Comparison of bCrrnt_c from "Guiding Center" with bTest from
# "Magnus expansion" approaches:
#
bCrrnt_cTest = np.zeros(indxTestMax)
bCrrnt_cTestRel = np.zeros(indxTestMax)
b_gc_ME_rel = np.zeros(indxTestMax)
for k in range(indxTestMax):
bCrrnt_cTest[k] = .5*(bCrrnt_c[2*k]+bCrrnt_c[2*k+1])
# bCrrnt_cTestRel[k] = bCrrnt_cTest[k]/b_ME[k]
b_gc_ME_rel[k] = b_gc[k]/b_ME[k]
actn_gc_ME_rel[k] = 1.e7*(action_gc[k]/action_ME[k]-1.)
if (plotFigureFlag == 0):
fig2060=plt.figure (2060)
# plt.semilogy(nn,bCrrnt_cTest[0:indxTestMax-1],'.r')
plt.plot(nn,bCrrnt_cTest[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('Test $b_{crrntTest}$, $cm$',color='m',fontsize=16)
plt.title('Test $b_{crrntTest} = .5 \cdot [b_{crrnt}(k)+b_{crrnt}(k+1)]$',color='m', \
fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
# plt.ylim([.9*min(bCrrnt_cTest),1.1*max(bCrrnt_cTest)])
plt.grid(True)
#
# Ratio b_gc/b_ME (absolute value):
#
if (plotFigureFlag == 0):
fig2070=plt.figure (2070)
# plt.semilogy(nn,b_gc_ME_rel[0:indxTestMax-1],'.r')
plt.plot(nn,b_gc_ME_rel[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{GC}/b_{ME}$',color='m',fontsize=16)
plt.title('Comparison of Distances $b_{GC}$ and $b_{ME}$ between Particles',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
# plt.ylim([.9*min(b_gc_ME_rel),1.1*max(b_gc_ME_rel)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2070.savefig('picturesCMA_v7/particleDistanceComprsn_gc_me_fig2070cma.png')
print ('File "picturesCMA_v7/particleDistanceComprsn_gc_me_fig2070cma.png" is written')
#
# Ratio b_gc/b_ME (relative value):
#
if (plotFigureFlag == 0):
fig2080=plt.figure (2080)
# plt.semilogy(nn,actn_gc_ME_rel[0:indxTestMax-1],'.r')
plt.plot(nn,actn_gc_ME_rel[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^7\cdot (J_{GC}/J_{ME}$ $-$ $1)$',color='m',fontsize=16)
plt.title('Comparison of Actions $J_{GC}$ and $J_{ME}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.ylim([.99*min(actn_gc_ME_rel),1.01*max(actn_gc_ME_rel)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2080.savefig('picturesCMA_v7/actionComprsn_gc_me_fig2080cma.png')
print ('File "picturesCMA_v7/actionComprsn_gc_me_fig2080cma.png" is written')
#
# Total length of interaction (1/2 of value):
#
nn=np.arange(0,nVion*nImpctPrmtr,1)
halfLintrTest = np.zeros(nVion*nImpctPrmtr)
for i in range(nVion):
for n in range(nImpctPrmtr):
halfLintrTest[nVion*i+n] = halfLintr[i,n]
if (plotFigureFlag == 0):
fig2090=plt.figure (2090)
plt.semilogy(nn,halfLintrTest,'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$0.5 \cdot L_{Intrctn}$, $cm$',color='m',fontsize=16)
plt.title('Total Length of Interaction: $L_{Intrctn}=2 \cdot [R_{max}^2-rho_{Init}^2)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-100,nVion*nImpctPrmtr+100])
plt.ylim([.9*min(halfLintrTest),1.1*max(halfLintrTest)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2090.savefig('picturesCMA/totalLengthIntrsctn_fig2090cma.png')
print ('File "picturesCMA/totalLengthIntrsctn_fig2090cma.png" is written')
#===================================================
#
# There is fitting for correct values of deltaEnrgIon_m
#
#===================================================
#
# Fitting for figures with deltaEnrgIon_m (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
#
# Fittied function:
#
# |deltaEnrgIon| = 10^fitA * rho^fitB,
# so that
#
# log10(|deltaEnrgIon|) = fitB*log10(rho) + fitA
#
# So, the dimension of expression (10^fitA * rho^fitB) is the same
# as deltaEnrgIon, i.e. eV
#
timeStart = os.times()
fitA_dEion = np.zeros(nVion) # dimensionless
fitB_dEion = np.zeros(nVion) # dimensionless
rhoInitFit_dEion = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_m_fit = np.zeros((nImpctPrmtr,nVion))
funcHi2_dEion = np.zeros(nVion)
fitA_dEion,fitB_dEion,funcHi2_dEion,rhoInitFit_dEion, deltaEnrgIon_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m)
dPosA_dEion = np.zeros(nVion)
dNegA_dEion = np.zeros(nVion)
dPosA_dEion,dNegA_dEion = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m_fit,fitA_dEion,fitB_dEion,funcHi2_dEion,1,2)
dPosB_dEion = np.zeros(nVion)
dNegB_dEion = np.zeros(nVion)
dPosB_dEion,dNegB_dEion = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m_fit,fitA_dEion,fitB_dEion,funcHi2_dEion,2,2)
# print ('Fitting for deltaEion:')
# for i in range(nVion):
# print ('i=%2d: fitA_dEion = %e (+%e,-%e), fitB_dEion = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_dEion[i],dPosA_dEion[i],dNegA_dEion[i], \
# fitB_dEion[i],dPosB_dEion[i],dNegB_dEion[i],funcHi2_dEion[i]))
#
# Analytical Integration of the fitted dependence 10**A*rho**B.
#
# For this dependece on rho:
#
# Friction force = 10**A*n_e*integral_rhoMin^rhoMax (rho**B*rho)*dRho =
# = 10**A*n_e/(B+2)*[rhoMax**(B+2)-rhoMax**(B+2)] (dimension=eV/cm):
#
frctnForce_AI = np.zeros(nVion)
for i in range(nVion):
factorA1 = math.pow(10.,fitA_dEion[i])
factorB1 = 2.+fitB_dEion[i]
frctnForce_AI[i] = 2.*pi*n_e*100.*factorA1/factorB1* \
(math.pow(impctPrmtrMax[i],factorB1)- \
math.pow(impctPrmtrMin,factorB1)) # eV/m
timeEnd = os.times()
timeFitting = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
print ('Time of integration = %6.3f seconds' % timeFitting)
#
# Dependences of transferred energy to ion on ion velocity for
# different initial impact parameters:
#
rhoSlctd = [.004,.02,.06,.1]
nRhoSlctd = len(rhoSlctd)
deltaEnrgIon_dpnd_Vi = np.zeros((nRhoSlctd,nVion))
npStart = np.zeros((nRhoSlctd,), dtype=int)
for k in range(nRhoSlctd):
slctdFlag = 0
for i in range(nVion):
if (slctdFlag == 0):
for n in range(nImpctPrmtr):
if (rhoInit[n,i] >= rhoSlctd[k]):
npStart[k] = i
slctdFlag = 1
break
for k in range(nRhoSlctd):
for i in range(npStart[k],nVion,1):
factorA = math.pow(10.,fitA_dEion[i])
deltaEnrgIon_dpnd_Vi[k,i] = factorA*math.pow(rhoSlctd[k],fitB_dEion[i])
# print ('deltaEnrgIon_dpnd_Vi[%d,%d] = %e' %(k,i,deltaEnrgIon_dpnd_Vi[k,i]))
#===================================================
#
# There is fitting of deltaPz_m (these values > 0 always) !!!
#
#===================================================
#
# Fitting for figures with deltaPz_m (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
#
# Fittied function:
#
# deltaPz_m = 10^fitA_pz * rho^fitB_pz,
# so that
#
# log10(deltaPz_m) = fitB_pz*log10(rho) + fitA_pz
#
# So, the dimension of expression (10^fitA_pz * rho^fitB_pz) is the same
# as deltaPz_m, i.e. eV
#
fitA_pz = np.zeros(nVion) # dimensionless
fitB_pz = np.zeros(nVion) # dimensionless
rhoInitFit_pz = np.zeros((nImpctPrmtr,nVion))
deltaPz_m_fit = np.zeros((nImpctPrmtr,nVion))
fitA_pz,fitB_pz,funcHi2_pz,rhoInitFit_pz, deltaPz_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaPz_m)
dPosA_pz = np.zeros(nVion)
dNegA_pz = np.zeros(nVion)
dPosA_pz,dNegA_pz = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPz_m_fit,fitA_pz,fitB_pz,funcHi2_pz,1,2)
dPosB_pz = np.zeros(nVion)
dNegB_pz = np.zeros(nVion)
dPosB_pz,dNegB_pz = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPz_m_fit,fitA_pz,fitB_pz,funcHi2_pz,2,2)
# print ('Fitting fordeltaPz_m:')
# for i in range(nVion):
# print ('i=%2d: fitA_pz = %e (+%e,-%e), fitB_pz = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_pz[i],dPosA_pz[i],dNegA_pz[i], \
# fitB_pz[i],dPosB_pz[i],dNegB_pz[i],funcHi2_pz[i]))
# print ('<fitA_pz> = %e +- %e' % (mean(fitA_pz),mean(dNegA_pz)))
# print ('<fitB_pz> = %e +- %e' % (mean(fitB_pz),mean(dNegB_pz)))
#===================================================
#
# There is fitting of deltaPx_m (these values > 0 always) !!!
#
#===================================================
#
rhoInitFit_px = np.zeros((nImpctPrmtr,nVion))
deltaPx_m_fit = np.zeros((nImpctPrmtr,nVion))
funcHi2__px = np.zeros(nVion)
fitA_px = np.zeros(nVion) # dimensionless
fitB_px = np.zeros(nVion) # dimensionless
fitA_px,fitB_px,funcHi2_px,rhoInitFit_px, deltaPx_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaPx_m)
dPosA_px = np.zeros(nVion)
dNegA_px = np.zeros(nVion)
dPosA_px,dNegA_px = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPx_m_fit,fitA_px,fitB_px,funcHi2_px,1,2)
dPosB_px = np.zeros(nVion)
dNegB_px = np.zeros(nVion)
dPosB_px,dNegB_px = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPx_m_fit,fitA_px,fitB_px,funcHi2_px,2,2)
# print ('Fitting for deltaPx_m:')
# for i in range(nVion):
# print ('i=%2d: fitA_px = %e (+%e,-%e), fitB_px = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_px[i],dPosA_px[i],dNegA_px[i], \
# fitB_px[i],dPosB_px[i],dNegB_px[i],funcHi2_px[i]))
xLimit = [1.015*np.log10(VionRel[0]),.95*np.log10(VionRel[nVion-1])]
yLimMin = 0.
yLimMax = 10.*min(fitA_pz)
if (min(fitA_pz) > 0):
yLimMin = 10.*max(fitA_pz)
yLimMax = 0.
for i in range(nVion):
if (fitA_pz[i] - dNegA_pz[i]) < yLimMin:
yLimMin = fitA_pz[i] - dNegA_pz[i]
if (fitA_pz[i] + dPosA_pz[i]) > yLimMax:
yLimMax = fitA_pz[i] + dPosA_pz[i]
# print ('Exponent A (pz): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.25,yLimMax+.25]
if (plotFigureFlag == 0):
fig3000=plt.figure (3000)
plt.errorbar(np.log10(VionRel),fitA_pz,yerr=[dNegA_pz,dPosA_pz],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $A$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_z$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-26.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-4.0,-28.,('<A>=%7.3f $\pm$ %5.3f' % (mean(fitA_pz),mean(dNegA_pz))), \
color='r',fontsize=16)
# plt.text(-3.25,-29.65,('$-$%5.3f' % (mean(dNegA_pz))),color='r',fontsize=12)
# plt.text(-3.25,-29.15,('$+$%5.3f' % (mean(dPosA_pz))),color='r',fontsize=12)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-28.25,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-28.25,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3000.savefig('picturesCMA_v7/fitA_dPz_fig3000cma.png')
print ('File "picturesCMA_v7/fitA_dPz_fig3000cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitB_pz)
if (min(fitB_pz) > 0):
yLimMin = 10.*max(fitB_pz)
yLimMax = 0.
for i in range(nVion):
if (fitB_pz[i] - dNegB_pz[i]) < yLimMin:
yLimMin = fitB_pz[i] - dNegB_pz[i]
if (fitB_pz[i] + dPosB_pz[i]) > yLimMax:
yLimMax = fitB_pz[i] + dPosB_pz[i]
# print ('Exponent B (pz): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.1,yLimMax+.1]
if (plotFigureFlag == 0):
fig3010=plt.figure (3010)
plt.errorbar(np.log10(VionRel),fitB_pz,yerr=[dNegB_pz,dPosB_pz],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $B$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_z$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-.87,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-1.55,('<B>=%6.3f $\pm$ %5.3f' % (mean(fitB_pz),mean(dNegB_pz))), \
color='r',fontsize=16)
# plt.text(-2.85,-2.25,('$-$%5.3f' % (mean(dNegB_pz))),color='r',fontsize=12)
# plt.text(-2.85,-1.75,('$+$%5.3f' % (mean(dPosB_pz))),color='r',fontsize=12)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-1.74,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-1.74,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3010.savefig('picturesCMA_v7/fitB_dPz_fig3010cma.png')
print ('File "picturesCMA_v7/fitB_dPz_fig3010cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitA_px)
if (min(fitA_px) > 0):
yLimMin = 10.*max(fitA_px)
yLimMax = 0.
for i in range(nVion):
if (fitA_px[i] - dNegA_px[i]) < yLimMin:
yLimMin = fitA_px[i] - dNegA_px[i]
if (fitA_px[i] + dPosA_px[i]) > yLimMax:
yLimMax = fitA_px[i] + dPosA_px[i]
# print ('Exponent A (px): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.15,yLimMax+.15]
if (plotFigureFlag == 0):
fig3020=plt.figure (3020)
plt.errorbar(np.log10(VionRel),fitA_px,yerr=[dNegA_px,dPosA_px],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $A$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_x$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-24.2,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-24.8,('<A>=%6.3f $\pm$ %5.3f' % (mean(fitA_px),mean(dNegA_px))), \
color='r',fontsize=16)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-25.05,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-25.05,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3020.savefig('picturesCMA_v7/fitA_dPx_fig3020cma.png')
print ('File "picturesCMA_v7/fitA_dPx_fig3020cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitB_px)
if (min(fitB_px) > 0):
yLimMin = 10.*max(fitB_px)
yLimMax = 0.
for i in range(nVion):
if (fitB_px[i] - dNegB_px[i]) < yLimMin:
yLimMin = fitB_px[i] - dNegB_px[i]
if (fitB_px[i] + dPosB_px[i]) > yLimMax:
yLimMax = fitB_px[i] + dPosB_px[i]
# print ('Exponent B (px): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.05,yLimMax+.05]
if (plotFigureFlag == 0):
fig3030=plt.figure (3030)
plt.errorbar(np.log10(VionRel),fitB_px,yerr=[dNegB_px,dPosB_px],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $B$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_x$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-.95,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-1.15,('<B>=%6.3f $\pm$ %5.3f' % (mean(fitB_px),mean(dNegB_px))), \
color='r',fontsize=16)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-1.22,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-1.22,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3030.savefig('picturesCMA_v7/fitB_dPx_fig3030cma.png')
print ('File "picturesCMA/_v7/fitB_dPx_fig3030cma.png" is written')
# plt.show()
# sys.exit()
#
#=======================================================
#
# Main plotting:
#
if (plotFigureFlag == 0):
fig110=plt.figure (110)
plt.plot(arrayA,arrayB,'.r')
plt.xlabel('$A=log_{10}(q_e^2/b/E_{kin})$',color='m',fontsize=16)
plt.ylabel('$B=log_{10}(R_{Larm}/b)$',color='m',fontsize=16)
plt.title('Map of Parameters A,B', color='m',fontsize=16)
# plt.xlim([minA,maxA])
# plt.ylim([minB,maxB])
plt.grid(True)
if (saveFilesFlag == 1):
fig110.savefig('picturesCMA/mapA-B_fig110cma.png')
print ('File "picturesCMA/mapA-B_fig110cma.png" is written')
if (plotFigureFlag == 0):
fig20=plt.figure (20)
plt.plot(nnTotalPoints,bCrrnt_c[0:2*totalPoints-1],'.r')
# plt.semilogy(nn,bCrrnt_c[0:2*totalPoints-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{Lab.Sys}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{Lab.Sys}$ between Particles in Lab.System', color='m',fontsize=16)
plt.xlim([-5000,2*totalPoints+5000])
# plt.xlim([0,2000])
plt.grid(True)
if (saveFilesFlag == 1):
fig20.savefig('picturesCMA/particleDistance_ls_fig20cma.png')
print ('File "picturesCMA/particleDistance_ls_fig20cma.png" is written')
if (plotFigureFlag == 0):
fig30=plt.figure (30)
plt.plot(nnTotalPoints,arrayA[0:2*totalPoints-1],'.r', \
nnTotalPoints,arrayB[0:2*totalPoints-1],'.b')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$A$, $B$',color='m',fontsize=16)
plt.title('$A=log_{10}(q_e^2/b/E_{kin})$, $B=log_{10}(R_{Larm}/b)$',color='m',fontsize=16)
plt.xlim([-5000,2*totalPoints+5000])
# plt.ylim([minB,maxB])
plt.grid(True)
plt.legend(['A','B'],loc='lower left',fontsize=14)
if (saveFilesFlag == 1):
fig30.savefig('picturesCMA/parametersA-B_fig30cma.png')
print ('File "picturesCMA/parametersA-B_fig30cma.png" is written')
xVionRel = np.zeros((nImpctPrmtr,nVion))
for i in range(nVion):
for n in range(nImpctPrmtr):
xVionRel[n,i] = VionRel[i]
if (plotFigureFlag == 0):
fig40=plt.figure (40)
for i in range(nVion):
plt.semilogx(xVionRel[0:nImpctPrmtr,i],rhoInit[0:nImpctPrmtr,i],'.r')
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=16)
plt.ylabel('$rho_{Init}$, cm',color='m',fontsize=16)
plt.title('Subdivisions for $rho_{Init}$ for Integration: Simpson Method', \
color='m',fontsize=16)
plt.grid(True)
yLimit=[0.,.405]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,-.026,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(3.9e-5,.05,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig40.savefig('picturesCMA/initialImpactParameter_SM_fig40cma.png')
print ('File "picturesCMA/initialImpactParameter_SM_fig40cma.png" is written')
if (plotFigureFlag == 0):
fig45=plt.figure (45)
for i in range(nVion):
plt.loglog(xVionRel[0:nImpctPrmtr,i],rhoInit[0:nImpctPrmtr,i],'.r')
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=16)
plt.ylabel('$rho_{Init}$, cm',color='m',fontsize=16)
plt.title('Subdivisions for $rho_{Init}$ for Integration: Simpson Method', \
color='m',fontsize=16)
plt.grid(True)
yLimit=[1.3e-3,.45]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,.15,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(3.9e-5,.15,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig45.savefig('picturesCMA/initialImpactParameter_SM_fig45cma.png')
print ('File "picturesCMA/initialImpactParameter_SM_fig45cma.png" is written')
'''
#
# Figure compares calculated values of of deltaEnrgIon (their dependences
# on impact parameter for different ion velocities) for two approaches
# (figure numbrFigures[0]+1 is the same and taking into account positive and
# negative values of the deltaEnrgIon_c for guiding center approach):
#
VionCrrnt = V0*VionRel[0]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
plt.figure (50)
plt.loglog(rhoInit[0:nImpctPrmtr-1,0],deltaEnrgIon_c[0:nImpctPrmtr-1,0],'-xr', \
rhoInit[0:nImpctPrmtr-1,0],deltaEnrgIon_m[0:nImpctPrmtr-1,0],'--or', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$', color='m',fontsize=16)
plt.ylabel('$\Delta E_{ion}$, $erg$', color='m',fontsize=16)
titleHeader = 'Transferred Energy $\Delta E_{ion}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*rhoInit[0,0],1.05*rhoInit[nImpctPrmtr-1,0]])
plt.grid(True)
xRhoInitPx_c = np.zeros(nImpctPrmtr*nVion)
xRhoInitPx_m = np.zeros(nImpctPrmtr*nVion)
yDeltaPx_c = np.zeros(nImpctPrmtr*nVion)
yDeltaPx_m = np.zeros(nImpctPrmtr*nVion)
indx_c = 0
indx_m = 0
for n in range(nImpctPrmtr):
if deltaPx_c[n,0] > 0.:
xRhoInitPx_c[indx_c] = rhoInit[n,0]
yDeltaPx_c[indx_c] = deltaPx_c[n,0]
# print ('n_c=%2d: xRhoInitPx_c = %e, yDeltaPx_c = %e' % \
# (indx_c,xRhoInitPx_c[indx_c],yDeltaPx_c[indx_c]))
indx_c += 1
if deltaPx_m[n,0] > 0.:
xRhoInitPx_m[indx_c] = rhoInit[n,0]
yDeltaPx_m[indx_c] = deltaPx_m[n,0]
# print ('n_m=%2d: xRhoInitPx_m = %e, yDeltaPx_m = %e' % \
# (indx_m,xRhoInitPx_m[indx_m],yDeltaPx_m[indx_m]))
indx_m += 1
maxIndx_c = indx_c-1
maxIndx_m = indx_m-1
# print ('maxIndx_c = %d, maxIndx_m = %d' % (maxIndx_c,maxIndx_m))
#
# Figure compares calculated values of of deltaPx (their dependences
# on impact parameter for different ion velocities) for two approaches
# (figure numbrFigures[0]+2 is the same and taking into account positive and
# negative values of the deltaPx_c for guiding center approach):
#
plt.figure (51)
plt.loglog(xRhoInitPx_c[0:maxIndx_c],yDeltaPx_c[0:maxIndx_c],'-xr', \
xRhoInitPx_m[0:maxIndx_m],yDeltaPx_m[0:maxIndx_m],'--or', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$, $cm$', \
color='m',fontsize=16)
plt.ylabel('$\Delta P_{ix}$, $g\cdot cm/s$', color='m',fontsize=16)
titleHeader = 'Transferred Momenta $\Delta P_{ix}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*min(xRhoInitPx_c[0],xRhoInitPx_m[0]), \
1.05*max(xRhoInitPx_c[maxIndx_c],xRhoInitPx_m[maxIndx_m])])
plt.grid(True)
xRhoInitPz_c = np.zeros(nImpctPrmtr*nVion)
xRhoInitPz_m = np.zeros(nImpctPrmtr*nVion)
yDeltaPz_c = np.zeros(nImpctPrmtr*nVion)
yDeltaPz_m = np.zeros(nImpctPrmtr*nVion)
indx_c = 0
indx_m = 0
for n in range(nImpctPrmtr):
if deltaPz_c[n,0] > 0.:
xRhoInitPz_c[indx_c] = rhoInit[n,0]
yDeltaPz_c[indx_c] = deltaPz_c[n,0]
# print ('n_c=%2d: xRhoInitPz_c = %e, yDeltaPz_c = %e' % \
# (indx_c,xRhoInitPz_c[indx_c],yDeltaPz_c[indx_c]))
indx_c += 1
if deltaPz_m[n,0] > 0.:
xRhoInitPz_m[indx_c] = rhoInit[n,0]
yDeltaPz_m[indx_c] = deltaPz_m[n,0]
# print ('n_m=%2d: xRhoInitPz_m = %e, yDeltaPz_m = %e' % \
# (indx_m,xRhoInitPz_m[indx_m],yDeltaPz_m[indx_m]))
indx_m += 1
maxIndx_c = indx_c-1
maxIndx_m = indx_m-1
# print ('maxIndx_c = %d, maxIndx_m = %d' % (maxIndx_c,maxIndx_m))
#
# Figure compares calculated values of of deltaPz (their dependences
# on impact parameter for different ion velocities) for two approaches
# (figure numbrFigures[0]+5):
#
plt.figure (53)
plt.loglog(xRhoInitPz_c[0:maxIndx_c],yDeltaPz_c[0:maxIndx_c],'-xr', \
xRhoInitPz_m[0:maxIndx_m],yDeltaPz_m[0:maxIndx_m],'--or', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$, $cm$', \
color='m',fontsize=16)
plt.ylabel('$\Delta P_{iz}$, $g\cdot cm/s$', color='m',fontsize=16)
titleHeader = 'Transferred Momenta $\Delta P_{iz}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*min(xRhoInitPz_c[0],xRhoInitPz_m[0]), \
1.05*max(xRhoInitPz_c[maxIndx_c],xRhoInitPz_m[maxIndx_m])])
plt.grid(True)
'''
#
# Figures 60,70,80, and 90 compare calculated values of of deltaEnrgIon
# (their dependences on impact parameter for first values of ion velocities)
# for two approaches (figure numbrFigures[*]+1 is the same and taking into
# account positive and negative values of the deltaEnrgIon_c for guiding center approach):
#
'''
VionCrrnt = V0*VionRel[1]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
plt.figure (60)
plt.loglog(rhoInit[0:nImpctPrmtr-1,1],deltaEnrgIon_c[0:nImpctPrmtr-1,1],'-xb', \
rhoInit[0:nImpctPrmtr-1,1],deltaEnrgIon_m[0:nImpctPrmtr-1,1],'--ob', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$', \
color='m',fontsize=16)
plt.ylabel('$\Delta E_{ion}$, $erg$', color='m',fontsize=16)
titleHeader = 'Transferred Energy $\Delta E_{ion}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*rhoInit[0,1],1.05*rhoInit[nImpctPrmtr-1,1]])
plt.grid(True)
VionCrrnt = V0*VionRel[2]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
plt.figure (70)
plt.loglog(rhoInit[0:nImpctPrmtr-1,2],deltaEnrgIon_c[0:nImpctPrmtr-1,2],'-xg', \
rhoInit[0:nImpctPrmtr-1,2],deltaEnrgIon_m[0:nImpctPrmtr-1,2],'--og', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$', \
color='m',fontsize=16)
plt.ylabel('$\Delta E_{ion}$, $erg$', color='m',fontsize=16)
titleHeader = 'Transferred Energy $\Delta E_{ion}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*rhoInit[0,2],1.05*rhoInit[nImpctPrmtr-1,2]])
plt.grid(True)
VionCrrnt = V0*VionRel[3]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
plt.figure (80)
plt.loglog(rhoInit[0:nImpctPrmtr-1,3],deltaEnrgIon_c[0:nImpctPrmtr-1,3],'-xk', \
rhoInit[0:nImpctPrmtr-1,3],deltaEnrgIon_m[0:nImpctPrmtr-1,3],'--ok', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$', \
color='m',fontsize=16)
plt.ylabel('$\Delta E_{ion}$, $erg$', color='m',fontsize=16)
titleHeader = 'Transferred Energy $\Delta E_{ion}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*rhoInit[0,3],1.05*rhoInit[nImpctPrmtr-2,3]])
plt.grid(True)
VionCrrnt = V0*VionRel[4]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
plt.figure (90)
plt.loglog(rhoInit[0:nImpctPrmtr-1,4],deltaEnrgIon_c[0:nImpctPrmtr-1,4],'-xm', \
rhoInit[0:nImpctPrmtr-1,4],deltaEnrgIon_m[0:nImpctPrmtr-1,4],'--om', \
linewidth=1)
plt.xlabel('Track Initial Impact Parameter $rho_{Init}$', \
color='m',fontsize=16)
plt.ylabel('$\Delta E_{ion}$, $erg$', color='m',fontsize=16)
titleHeader = 'Transferred Energy $\Delta E_{ion}$ to Ion for '
titleHeader += ' $V_{ion}=%5.3f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=16)
plt.xlim([.95*rhoInit[0,4],1.05*rhoInit[nImpctPrmtr-1,4]])
plt.grid(True)
'''
#
# Dependences of transferred energy to ion and different momenta on initial
# impact parameter for different ion velocity (calculated and fitted values):
#
indxFigures = [0,9,12,18,19,23,27,29,31,34,39,49]
numbrFigures = [500,600,630,660,700,730,760,800,830,860,900,1000]
xPos = [.00218,.0022,.0024,.0027,.0026,.00265,.00265,.00265,.00265,.0028,.0029,.0035]
yPos = [6.4e-9,6.7e-9,6.4e-9,5.9e-9,6.2e-9,5.6e-9,5.8e-9,6.3e-9,5.8e-9,5.9e-9,5.8e-9,4.7e-9]
if (plotFigureFlag == 0):
for i in range(12):
VionCrrnt = V0*VionRel[indxFigures[i]]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
#
# Pz:
#
posPz_c = np.zeros((12,nImpctPrmtr))
rhoPosPz_c = np.zeros((12,nImpctPrmtr))
negPz_c = np.zeros((12,nImpctPrmtr))
rhoNegPz_c = np.zeros((12,nImpctPrmtr))
posPz_m = np.zeros((12,nImpctPrmtr))
rhoPosPz_m = np.zeros((12,nImpctPrmtr))
negPz_m = np.zeros((12,nImpctPrmtr))
rhoNegPz_m = np.zeros((12,nImpctPrmtr))
nPosPz_c = array('i',[0]*12)
nNegPz_c = array('i',[0]*12)
nPosPz_m = array('i',[0]*12)
nNegPz_m = array('i',[0]*12)
for i in range(12):
nPosPz_c[i] = -1
nNegPz_c[i] = -1
nPosPz_m[i] = -1
nNegPz_m[i] = -1
for k in range(nImpctPrmtr):
if (deltaPz_c[k,indxFigures[i]] > 0):
nPosPz_c[i] += 1
rhoPosPz_c[i,nPosPz_c[i]] = rhoInit[k,indxFigures[i]]
posPz_c[i,nPosPz_c[i]] = deltaPz_c[k,indxFigures[i]]
if (deltaPz_c[k,indxFigures[i]] <= 0):
nNegPz_c[i] += 1
rhoNegPz_c[i,nNegPz_c[i]] = rhoInit[k,indxFigures[i]]
negPz_c[i,nNegPz_c[i]] = abs(deltaPz_c[k,indxFigures[i]])
if (deltaPz_m[k,indxFigures[i]] > 0):
nPosPz_m[i] += 1
rhoPosPz_m[i,nPosPz_m[i]] = rhoInit[k,indxFigures[i]]
posPz_m[i,nPosPz_m[i]] = deltaPz_m[k,indxFigures[i]]
if (deltaPz_m[k,indxFigures[i]] <= 0):
nNegPz_m[i] += 1
rhoNegPz_m[i,nNegPz_m[i]] = rhoInit[k,indxFigures[i]]
negPz_m[i,nNegPz_m[i]] = abs(deltaPz_m[k,indxFigures[i]])
# print ('i=%d: nPosPz_c=%d, nNegPz_c=%d, nPosPz_m=%d, nNegPz_m=%d' % \
# (i,nPosPz_c[i],nNegPz_c[i],nPosPz_m[i],nNegPz_m[i]))
#
# Figures to compare calculated values of of deltaPz (their dependences
# on impact parameter for different ion velocities) for two approaches
#
if (plotFigureFlag == 0):
for i in range(12):
VionCrrnt = V0*VionRel[indxFigures[i]]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
helpValue_c = int(nPosPz_c[i])
helpValue_m = int(nPosPz_m[i])
figCrrnt = plt.figure(numbrFigures[i]+1)
plt.loglog(rhoPosPz_c[i,0:nPosPz_c[i]],posPz_c[i,0:nPosPz_c[i]] ,'xb', \
rhoNegPz_c[i,0:nPosPz_c[i]],negPz_c[i,0:nPosPz_c[i]] ,'ob', \
rhoPosPz_m[i,0:nPosPz_m[i]],posPz_m[i,0:nPosPz_m[i]] ,'xr', \
rhoNegPz_m[i,0:nPosPz_m[i]],negPz_m[i,0:nPosPz_m[i]] ,'or',linewidth=2)
plt.ylabel('$|\Delta P_z|$, $eV$', color='m',fontsize=14)
plt.legend(['$\Delta P_z > 0$ (CG)','$\Delta P_z < 0$ (CG)', \
'$\Delta P_z > 0$ (ME)','$\Delta P_z < 0$ (ME)'], \
loc='lower left',fontsize=10)
plt.xlabel('Initial Impact Parameter $rho_{Init}$, $cm$',color='m',fontsize=14)
titleHeader = 'Transferred Momenta $\Delta P_z$ to Single Ion:'
titleHeader += ' $V_{ion}=%3.1f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=14)
plt.xlim([.95*rhoInit[0,indxFigures[i]],1.05*rhoInit[nImpctPrmtr-1,indxFigures[i]]])
# plt.text(xPos[i],yPos[i],'Fitted $\Delta E_{ion}$ are proportional to $rho_{Init}^{-B}$', \
# color='m',fontsize=16)
plt.grid(True)
if (saveFilesFlag == 1):
fileName = 'picturesCMA/deltaEtransf_indxPlot-'+str(indxFigures[i])+'_fig'
fileName += str(numbrFigures[i])+'cma.png'
figCrrnt.savefig(fileName)
print ('File "',fileName,'" is written')
#
# Px:
#
posPx_c = np.zeros((12,nImpctPrmtr))
rhoPosPx_c = np.zeros((12,nImpctPrmtr))
negPx_c = np.zeros((12,nImpctPrmtr))
rhoNegPx_c = np.zeros((12,nImpctPrmtr))
posPx_m = np.zeros((12,nImpctPrmtr))
rhoPosPx_m = np.zeros((12,nImpctPrmtr))
negPx_m = np.zeros((12,nImpctPrmtr))
rhoNegPx_m = np.zeros((12,nImpctPrmtr))
nPosPx_c = array('i',[0]*12)
nNegPx_c = array('i',[0]*12)
nPosPx_m = array('i',[0]*12)
nNegPx_m = array('i',[0]*12)
for i in range(12):
nPosPx_c[i] = -1
nNegPx_c[i] = -1
nPosPx_m[i] = -1
nNegPx_m[i] = -1
for k in range(nImpctPrmtr):
if (deltaPx_c[k,indxFigures[i]] > 0):
nPosPx_c[i] += 1
rhoPosPx_c[i,nPosPx_c[i]] = rhoInit[k,indxFigures[i]]
posPx_c[i,nPosPx_c[i]] = deltaPx_c[k,indxFigures[i]]
if (deltaPx_c[k,indxFigures[i]] <= 0):
nNegPx_c[i] += 1
rhoNegPx_c[i,nRegPx_c[i]] = rhoInit[k,indxFigures[i]]
negPx_c[i,nRegPx_c[i]] = abs(deltaPx_c[k,indxFigures[i]])
if (deltaPx_m[k,indxFigures[i]] > 0):
nPosPx_m[i] += 1
rhoPosPx_m[i,nPosPx_m[i]] = rhoInit[k,indxFigures[i]]
posPx_m[i,nPosPx_m[i]] = deltaPx_m[k,indxFigures[i]]
if (deltaPx_m[k,indxFigures[i]] <= 0):
nNegPx_m[i] += 1
rhoNegPx_m[i,nNegPx_m[i]] = rhoInit[k,indxFigures[i]]
negPx_m[i,nNegPx_m[i]] = abs(deltaPx_m[k,indxFigures[i]])
# print ('nPosPx_c=%d, nNegPx_c=%d, nPosPx_m=%d, nNegPx_m=%d' % \
# (nPosPx_c,nNegPx_c,nPosPx_m,nNegPx_m))
#
# Comparison of calculated values of deltaPx (their dependences
# on impact parameter for different ion velocities) for two approaches:
#
if (plotFigureFlag == 0):
for i in range(12):
VionCrrnt = V0*VionRel[indxFigures[i]]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
figCrrnt = plt.figure(numbrFigures[i]+2)
plt.loglog(rhoPosPx_c[i,0:nPosPx_c[i]],.99*posPx_c[i,0:nPosPx_c[i]] ,'xb', \
rhoPosPx_m[i,0:nPosPx_m[i]],1.01*posPx_m[i,0:nPosPx_m[i]] ,'xr',linewidth=2)
# plt.loglog(rhoPosPx_c[i,0:nPosPx_c[i]],.99*posPx_c[i,0:nPosPx_c[i]] ,'xb', \
# rhoNegPx_c[i,0:nNegPx_c[i]],.99*negPx_c[i,0:nNegPx_c[i]] ,'ob', \
# rhoPosPx_m[i,0:nPosPx_m[i]],1.01*posPx_m[i,0:nPosPx_m[i]] ,'xr', \
# rhoNegPx_m[i,0:nNegPx_m[i]],1.01*negPx_m[i,0:nNegPx_m[i]] ,'or',linewidth=2)
plt.ylabel('$\Delta P_x$, $eV$', color='m',fontsize=14)
# plt.ylabel('$|\Delta P_x|$, $eV$', color='m',fontsize=14)
plt.legend(['$0.99\cdot\Delta P_x$: CG - Center Guide', \
'$1.01\cdot\Delta P_x$: ME - Magnus Expansion'],loc='lower left',fontsize=10)
# plt.legend(['$\Delta P_x > 0$ (CG)','$\Delta P_x < 0$ (CG)', \
# '$\Delta P_x > 0$ (ME)','$\Delta P_x < 0$ (ME)'], \
# loc='lower left',fontsize=11)
plt.xlabel('Initial Impact Parameter $rho_{Init}$, $cm$',color='m',fontsize=14)
titleHeader = 'Transferred Momenta $\Delta P_x$ to Single Ion:'
titleHeader += ' $V_{ion}=%3.1f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=14)
plt.xlim([.95*rhoInit[0,indxFigures[i]],1.05*rhoInit[nImpctPrmtr-1,indxFigures[i]]])
# plt.text(xPos[i],yPos[i],'Fitted $\Delta E_{ion}$ are proportional to $rho_{Init}^{-B}$', \
# color='m',fontsize=16)
plt.grid(True)
if (saveFilesFlag == 1):
fileName = 'picturesCMA/deltaEtransf_indxPlot-'+str(indxFigures[i])+'_fig'
fileName += str(numbrFigures[i])+'cma.png'
figCrrnt.savefig(fileName)
print ('File "',fileName,'" is written')
timeEnd = os.times()
timeIntgrtn = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
print ('Time of plotting = %6.3f seconds' % timeIntgrtn)
yPosText = [-2.12,-2.12,-2.12,-2.20,-2.12,-2.12,-2.12,-2.20,-2.12,-2.12,-2.12,-2.12]
#
# Dependence of calculated and fitted values of deltaPz on impact parameter
# for different ion velocities:
#
if (plotFigureFlag == 0):
for i in range(12):
VionCrrnt = V0*VionRel[indxFigures[i]]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
figCrrnt = plt.figure(numbrFigures[i]+5)
plt.loglog(rhoInit[0:nImpctPrmtr,indxFigures[i]], \
1.e24*deltaPz_m[0:nImpctPrmtr,indxFigures[i]],'xr', \
rhoInitFit_pz[0:nImpctPrmtr,indxFigures[i]], \
1.e24*deltaPz_m_fit[0:nImpctPrmtr,indxFigures[i]],'ob',linewidth=2)
plt.xlabel('Initial Impact Parameter $rho_{Init}$, $cm$',color='m',fontsize=14)
plt.ylabel('$10^{24} \cdot \Delta P_z$, $eV$', color='m',fontsize=14)
titleHeader = 'Transferred Momenta $\Delta P_z$ to Single Ion:'
titleHeader += ' $V_{ion}=%3.1f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=14)
plt.xlim([.95*rhoInit[0,indxFigures[i]],1.05*rhoInit[nImpctPrmtr-1,indxFigures[i]]])
plt.ylim([ .9e24*deltaPz_m[nImpctPrmtr-1,indxFigures[i]], \
1.1e24*deltaPz_m_fit[0,indxFigures[i]]])
plt.legend(['Calculated Data', \
('Fitting: $\Delta P_z=10^A\cdot$rho$_{init}^B$; B = %5.3f $\pm$ %5.3f' % \
(fitB_pz[indxFigures[i]],dNegB_pz[indxFigures[i]]))],loc='lower left',fontsize=11)
# plt.text(xPos[i],yPos[i],'Fitted $\Delta P_z$ are proportional to $rho_{Init}^{-B}$', \
# color='m',fontsize=16)
plt.grid(True)
if (saveFilesFlag == 1):
fileName = 'picturesCMA_v7/dPz_withFit_indxPlot'+str(indxFigures[i])+'_fig'
fileName += str(numbrFigures[i]+5)+'cma.png'
figCrrnt.savefig(fileName)
print ('File "',fileName,'" is written')
#
# Dependence of calculated and fitted values of deltaPx on impact parameter
# for different ion velocities:
#
if (plotFigureFlag == 0):
for i in range(12):
VionCrrnt = V0*VionRel[indxFigures[i]]
powVionCrrnt = math.floor(np.log10(VionCrrnt))
mantVionCrrnt = VionCrrnt/(10**powVionCrrnt)
figCrrnt = plt.figure(numbrFigures[i]+7)
plt.loglog(rhoInit[0:nImpctPrmtr,indxFigures[i]], \
deltaPx_m[0:nImpctPrmtr,indxFigures[i]],'xr', \
rhoInitFit_px[0:nImpctPrmtr,indxFigures[i]], \
deltaPx_m_fit[0:nImpctPrmtr,indxFigures[i]],'ob',linewidth=2)
plt.xlabel('Initial Impact Parameter $rho_{Init}$, $cm$',color='m',fontsize=14)
plt.ylabel('$\Delta P_x$, $eV$', color='m',fontsize=14)
titleHeader = 'Transferred Momenta $\Delta P_x$ to Single Ion:'
titleHeader += ' $V_{ion}=%3.1f\cdot10^{%2d}$ $cm/s$'
plt.title(titleHeader % (mantVionCrrnt,powVionCrrnt),color='m',fontsize=14)
plt.xlim([.95*rhoInit[0,indxFigures[i]],1.05*rhoInit[nImpctPrmtr-1,indxFigures[i]]])
plt.ylim([ .9*deltaPx_m[nImpctPrmtr-1,indxFigures[i]], \
1.1*deltaPx_m_fit[0,indxFigures[i]]])
plt.legend(['Calculated Data', \
('Fitting: $\Delta P_x=10^A\cdot rho_{init}^B$; B = %5.3f $\pm$ %5.3f' % \
(fitB_px[indxFigures[i]],dNegB_px[indxFigures[i]]))],loc='lower left',fontsize=11)
# plt.text(xPos[i],yPos[i],'Fitted $\Delta P_x$ are proportional to $rho_{Init}^{-B}$', \
# color='m',fontsize=16)
plt.grid(True)
if (saveFilesFlag == 1):
fileName = 'picturesCMA_v7/dPx_withFit_indxPlot-'+str(indxFigures[i])+'_fig'
fileName += str(numbrFigures[i]+7)+'cma.png'
figCrrnt.savefig(fileName)
print ('File "',fileName,'" is written')
#
# Dependence of calculated and fitted values of deltaEnrgIon
# on impact parameter for different ion velocities:
#
if (plotFigureFlag == 0):
for i in range(12):
VionCrrnt = V0*VionRel[indxFigures[i]]
powVionCrrnt = math.floor( | np.log10(VionCrrnt) | numpy.log10 |
#!/usr/bin/env python
import rospy
import numpy as np
import sys
import os
import tf
from dbw_mkz_msgs.msg import SteeringReport
from sensor_msgs.msg import Image
from derived_object_msgs.msg import ObjectWithCovarianceArray
from std_msgs.msg import String
from sensor_msgs.msg import Imu
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Twist
from geometry_msgs.msg import TwistStamped
from nav_msgs.msg import Odometry
from std_msgs.msg import Header
from darknet_ros_msgs.msg import BoundingBoxes
from darknet_ros_msgs.msg import ObjectCount
from nuscenes2bag.msg import RadarObjects
from utils import Mat_buildROS
from utils import Mat_extractROS
from flir_adk_multi.msg import trackArrayRdr
from flir_adk_multi.msg import trackRdr
from flir_adk_multi.msg import trackArrayCam
from flir_adk_multi.msg import trackCam
from sensor_msgs.msg import PointCloud2
from dbw_mkz_msgs.msg import WheelSpeedReport
from utils import CamObj
from utils import RadarObj
from utils import RadarObjMKZ
from std_msgs.msg import Float32MultiArray
from std_msgs.msg import MultiArrayDimension
from cv_bridge import CvBridge, CvBridgeError
import cv2
from itertools import permutations
import time
def main():
rospy.init_node('jpda', anonymous=True)
DataSetType=sys.argv[1]
Method=sys.argv[2]
PlotArg=sys.argv[3] # 0-No Plot; 1-Combined; 2-Cam; 3-Rdr; 4-Both Cam&Rdr
fusInst=jpda_class(DataSetType,Method,PlotArg)
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
class jpda_class():
def __init__(self,DataSetType,Method,PlotArg):
self.TrackPubRdr=rospy.Publisher("dataAssocRdr",trackArrayRdr, queue_size=2)
self.TrackPubCam=rospy.Publisher("dataAssocCam",trackArrayCam, queue_size=2)
self.image_pub=rospy.Publisher("fusedImage",Image, queue_size=2)
filePathPrefix=str("/home/vamsi/Tracking/py-motmetrics/motmetrics/res_dir/")
self.DestF=open((filePathPrefix+'seq1'+'.txt'),"w")
# self.YoloClassList=[0,1,2,3,5,7] # For NuSc
# self.YoloClassList=[0,1,2] # For Yolov3_flir
self.YoloClassList=[2] # For Matlab Sim Data
self.GateThreshRdr =1# Scaling factor, threshold for gating
self.GateThreshCam=10# TODO: adjust?
self.trackInitRdrThresh=0.5 # For track initiation
self.trackInitCamThresh=20 # Radius of 15 pixels allowed
self.CombGateThresh=10# in pixels (added to radius buffer)
self.bridge=CvBridge()
self.font=cv2.FONT_HERSHEY_SIMPLEX
# Initializing parameters:
self.Q_rdr=np.array([[10,0,0,0],[0,10,0,0],[0,0,5,0],[0,0,0,1]])
self.R_rdr=np.array([[3,0,0],[0,3,0],[0,0,3]])
# self.Q_cam=np.diag([10,10,15,15,10,10,15,15])
self.Q_cam=np.diag([5,5,10,10,10,10,20,20])
self.R_cam=np.array([[5,0,0,0],[0,5,0,0],[0,0,5,0],[0,0,0,5]])
self.CamMsrtmMatrixH=np.array([[1,0,0,0,0,0,0,0],[0,1,0,0,0,0,0,0],\
[0,0,1,0,0,0,0,0],[0,0,0,1,0,0,0,0]]) # Only positions and w/h are measured)
self.Vt=0.0
self.velY=0.0
self.velX=0.0
self.psi=0.0
self.psiD=0.0# psiDot
self.Method=Method
self.PlotArg=PlotArg
self.HorzOffset=0# For translation from radar to cam coordinates, manual offset
self.CamXOffset=2.36#=93 inches, measured b/w cam and Rdr, in x direction
self.CamZoffset=1 # Roughly 40 inches
self.imageTime=Header()
self.BBoxStore=BoundingBoxes()
#Params for writing tracks to TXT:
self.delta_x = 0
self.delta_y = 0 # Assuming that the radar and camera are on same centerline
self.delta_z = 1.0414/2
self.H_FOV=190
self.V_FOV=41 #Calculated based on aspect ratio
self.HorzOffsetTXT=0 # Manual horizontal (Y-direction) offset for radar in pixels
self.VertOffsetTXT=-30 # Manual vertical (Z-direction) offset for radar in pixels
self.ImageExists=0
self.BBheight=90
self.BBWidth=90 # For now, static
self.FrameInit=1
self.UseCamTracksOnly=1 #1 if using only camera tracks, 0 if using combined tracks for eval
if DataSetType=="NuSc":
rospy.Subscriber('/cam_front/raw', Image, self.buildImage)
rospy.Subscriber('/vel', Twist, self.Odom1NuSc)
rospy.Subscriber('/odom', Odometry, self.Odom2NuSc)
rospy.Subscriber('/imu', Imu, self.Odom3NuSc)
rospy.Subscriber('/radar_front', BoundingBoxes, self.RdrMsrmtsNuSc)
elif DataSetType=="MKZ":
self.CamFOV=190.0
rospy.Subscriber('/Thermal_Panorama', Image, self.buildImage)
rospy.Subscriber('/imu/data', Imu, self.Odom2MKZ) # TODO: fix after IMU is available
rospy.Subscriber('/vehicle/twist', TwistStamped,self.Odom3MKZ)
rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes,self.BBoxBuilder)
rospy.Subscriber('/os_cloud_node/points', PointCloud2,self.writeToFile) #Only write to file everytime a new lidar PCL is published
rate=rospy.Rate(10) # 20 Hz
while not rospy.is_shutdown():
# CycleStartTime=time.time()
# startTime=time.time()
# rospy.Subscriber('/as_tx/objects', ObjectWithCovarianceArray,self.RdrMsrmtsMKZ)
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
self.RdrMsrmtsMKZ(rospy.wait_for_message('/as_tx/objects', ObjectWithCovarianceArray))
self.CamMsrmts(rospy.wait_for_message('/darknet_ros/found_object', ObjectCount))
# # print('TOTAL for RDR:' + str(time.time()-startTime))
# # startTime=time.time()
# try:
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
# except:
# rospy.loginfo('No Camera Data/Bounding Boxes found')
# pass
# print('TOTAL for CAM:' + str(time.time()-startTime))
# startTimeCom=time.time()
# print('Time Combining:' + str(time.time()-startTimeCom))
# print('Total Cycle Time:' + str(time.time()-CycleStartTime))
self.CamRdrCombine()
rate.sleep()
elif DataSetType=="matlab":
self.CamFOV=50
rospy.Subscriber('/Thermal_Panorama', Image, self.buildImage)
rospy.Subscriber('/imu/data', Imu, self.Odom2MKZ) # TODO: fix after IMU is available
rospy.Subscriber('/vehicle/twist', TwistStamped,self.Odom3MKZ)
rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes,self.BBoxBuilder)
rate=rospy.Rate(10) # 20 Hz
while not rospy.is_shutdown():
# CycleStartTime=time.time()
# startTime=time.time()
# rospy.Subscriber('/as_tx/objects', ObjectWithCovarianceArray,self.RdrMsrmtsMKZ)
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
self.RdrMsrmtsMKZ(rospy.wait_for_message('/as_tx/objects', ObjectWithCovarianceArray))
self.CamMsrmts(rospy.wait_for_message('/darknet_ros/found_object', ObjectCount))
# # print('TOTAL for RDR:' + str(time.time()-startTime))
# # startTime=time.time()
# try:
# rospy.Subscriber('/darknet_ros/found_object', ObjectCount,self.CamMsrmts)
# except:
# rospy.loginfo('No Camera Data/Bounding Boxes found')
# pass
# print('TOTAL for CAM:' + str(time.time()-startTime))
# startTimeCom=time.time()
# print('Time Combining:' + str(time.time()-startTimeCom))
# print('Total Cycle Time:' + str(time.time()-CycleStartTime))
self.CamRdrCombine()
rate.sleep()
def buildImage(self,data):
if not(hasattr(self,'image')):
self.image=[]
self.image=self.bridge.imgmsg_to_cv2(data, "rgb8")
self.imageTime=data.header
self.ImageExists=1
def Odom1NuSc(self,data):
self.Vt =data.linear.x
def Odom2NuSc(self,data):
self.psi=data.pose.pose.orientation.z
def Odom3NuSc(self,data):
self.psiD=data.angular_velocity.z
def Odom1MKZ(self,data): # REMOVE
self.Vt=data.speed
def Odom2MKZ(self,data):
self.psi=tf.transformations.euler_from_quaternion([data.orientation.x,data.orientation.y,data.orientation.z,data.orientation.w])[2]
# psi above is in radians, with 0 facing due EAST, not north
def Odom3MKZ(self,data):
self.psiD=data.twist.angular.z
self.Vt=data.twist.linear.x
self.velX=self.Vt # For use in calculating velocity of cut in vehicle(tracking target), Vc
def writeToFile(self,data):
# print('Writing ToFile')
if not hasattr(self,'CombinedTracks'):
return
# self.Readoings=[]
# n=len(self.RadarTracks)
RadarAnglesH=0.0
RadarAnglesV=0.0
frame=self.FrameInit
self.FrameInit+=1
if self.UseCamTracksOnly==1:
writeTracks=self.CurrentCamTracks
else:
writeTracks=self.CombinedTracks
for idx in range(len(writeTracks.tracks)):
# if (data.objects[idx].pose.pose.position.x==0.0) and (data.objects[idx].pose.pose.position.y==0.0) and (data.objects[idx].pose.covariance[0]==0.0):
# continue #Zero entry, so skip it
# else: #write to file
# <frame>, <id>, <bb_left>, <bb_top>, <bb_width>, <bb_height>, <conf>, <x>, <y>, <z>
id=int(idx+1) # TODO: This is temp, not true ID of car
# RadarX=data.objects[idx].pose.pose.position.x+self.delta_x
# RadarY=data.objects[idx].pose.pose.position.y
# RadarZ=0.0+self.delta_z
# RadarAnglesH=-np.degrees(np.arctan(np.divide(RadarY,RadarX)))
# RadarAnglesV=np.abs(np.degrees(np.arctan(np.divide(RadarZ,RadarX)))) #will always be negative, so correct for it
if self.ImageExists==1:
# imageTemp = self.image
# print(imageTemp.shape)
# CameraX=RadarAnglesH*(self.image.shape[1]/self.H_FOV) + self.image.shape[1]/2 +self.HorzOffsetTXT# Number of pixels per degree,adjusted for shifting origin from centerline to top left
# CameraY=RadarAnglesV*(self.image.shape[0]/self.V_FOV) +256 +self.VertOffsetTXT -RadarX*np.sin(np.radians(4)) # Number of pixels per degree,adjusted for shifting origin from centerline to top left
#Write to File
bb_left=int(writeTracks.tracks[idx].yPx.data)
bb_top=int(writeTracks.tracks[idx].zPx.data)
bb_width=int(writeTracks.tracks[idx].width.data)
bb_height=int(writeTracks.tracks[idx].height.data)
x=-1 # Fillers
y=-1
z=-1
conf=1
outLine=str(frame)+' '+str(id)+' '+str(bb_left)+' '+str(bb_top)+' '+str(bb_width)+' '+str(bb_height)+' '+str(conf)+' '+str(x)+' '+str(y)+' '+str(z)+'\n'
# print(outLine)
self.DestF.write(outLine)
def CamIOUcheck(self,checkIdx):
#Return boolean. checks if IOU of given SensorIndex over any Current tracks is greater than threshold
# if it is, then returns false
outBool2=True # By default
#TODO: perform check if required
return outBool2
def trackInitiator(self,SensorData):
if not any(SensorData):
return
elif isinstance(SensorData[0],CamObj):
if hasattr(self, 'InitiatedCamTracks'):
# Then, move to current tracks based on NN-style gating
toDel=[]
InitiatedCamTracks=self.InitiatedCamTracks
# first build array of all sensor indices that are within validation gate of current tracks
if hasattr(self,'CurrentCamTracks'):
TempCurrTracks=self.CurrentCamTracks
SensorIndicesInit=[]
for cdx in range(len(TempCurrTracks.tracks)):
SensorIndicesInit.append(self.ValidationGate(SensorData,TempCurrTracks.tracks[cdx]))
else:
SensorIndicesInit=[]
for idx in range(len(InitiatedCamTracks.tracks)):
R=[]
if len(SensorData)==0:
continue
for jdx in range(len(SensorData)):
# If the Sensor Data is already in validatation gate of any of the currentTracks, skip adding that into InitiatedTracks
if self.InitSensorValidator(SensorIndicesInit,jdx):
continue
else:
R.append(np.sqrt((InitiatedCamTracks.tracks[idx].yPx.data-(SensorData[jdx].xmax+SensorData[jdx].xmin)/2)**2 \
+(InitiatedCamTracks.tracks[idx].zPx.data-(SensorData[jdx].ymax+SensorData[jdx].ymin)/2)**2))
if len(R)==0:
R=9000 #Arbitrarily large value
R=np.asarray(R)
# print()
# print(R)
if (np.min(R)<self.trackInitCamThresh): # Then move this to current track # Inherent assumption here is that only one will be suitable
jdx=np.argmin(R)
if not hasattr(self, 'CurrentCamTracks'):
self.CurrentCamTracks=trackArrayCam()
delT=self.imageTime.stamp-InitiatedCamTracks.header.stamp
delT=delT.to_sec()
self.CurrentCamTracks.header=SensorData[jdx].header
InitiatedCamTracks.tracks[idx].Stat.data=1 # Moving to current track
# Update the track with new sensor data before pushing to Current tracks
InitiatedCamTracks.tracks[idx].VyPx.data=\
(InitiatedCamTracks.tracks[idx].yPx.data-(SensorData[jdx].xmax+SensorData[jdx].xmin)/2)/delT
InitiatedCamTracks.tracks[idx].VzPx.data=\
(InitiatedCamTracks.tracks[idx].zPx.data-(SensorData[jdx].ymax+SensorData[jdx].ymin)/2)/delT
InitiatedCamTracks.tracks[idx].widthDot.data=\
(InitiatedCamTracks.tracks[idx].width.data-(SensorData[jdx].xmax-SensorData[jdx].xmin))/delT
InitiatedCamTracks.tracks[idx].heightDot.data=\
(InitiatedCamTracks.tracks[idx].height.data-(SensorData[jdx].ymax-SensorData[jdx].ymin))/delT
InitiatedCamTracks.tracks[idx].height.data=(SensorData[jdx].ymax-SensorData[jdx].ymin)
InitiatedCamTracks.tracks[idx].width.data=(SensorData[jdx].xmax-SensorData[jdx].xmin)
InitiatedCamTracks.tracks[idx].yPx.data=(SensorData[jdx].xmax+SensorData[jdx].xmin)/2
InitiatedCamTracks.tracks[idx].zPx.data=(SensorData[jdx].ymax+SensorData[jdx].ymin)/2
InitiatedCamTracks.tracks[idx].confidence=SensorData[jdx].confidence
Pk=np.diag([5,5,5,5,50,50,50,50]) # Initial covariance matrix
InitiatedCamTracks.tracks[idx].P=Mat_buildROS(Pk)
self.CurrentCamTracks.tracks=np.append(self.CurrentCamTracks.tracks,InitiatedCamTracks.tracks[idx])
toDel.append(idx)
SensorData=np.delete(SensorData,np.argmin(R))
else: # for this idx of InitiatedCamTrack, the last jdx, so no measurements are nearby; delete the idx
toDel.append(idx)
#Clean all InitiatedCamTracks using toDel
self.InitiatedCamTracks.tracks=np.delete(InitiatedCamTracks.tracks,toDel)
#Remove old initiated tracks (if idle for more than 3 time steps):
toDel2=[]
for idx in range(len(self.InitiatedCamTracks.tracks)):
self.InitiatedCamTracks.tracks[idx].Stat.data=self.InitiatedCamTracks.tracks[idx].Stat.data-1
if self.InitiatedCamTracks.tracks[idx].Stat.data<0:
toDel2.append(idx)
self.InitiatedCamTracks.tracks=np.delete(self.InitiatedCamTracks.tracks,toDel2)
# Then concatenate remaining sensor Data for future initation
if len(SensorData)==0:
return
self.InitiatedCamTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedCamTracks.tracks=np.append(self.InitiatedCamTracks.tracks,trackCam())
self.InitiatedCamTracks.tracks[-1].Stat.data= -1 # InitiatedTrack
self.InitiatedCamTracks.tracks[-1].yPx.data=(SensorData[idx].xmax+SensorData[idx].xmin)/2
self.InitiatedCamTracks.tracks[-1].zPx.data=(SensorData[idx].ymax+SensorData[idx].ymin)/2
self.InitiatedCamTracks.tracks[-1].VyPx.data=0
self.InitiatedCamTracks.tracks[-1].VzPx.data=0
self.InitiatedCamTracks.tracks[-1].width.data=(SensorData[idx].xmax-SensorData[idx].xmin)
self.InitiatedCamTracks.tracks[-1].widthDot.data=0
self.InitiatedCamTracks.tracks[-1].height.data=(SensorData[idx].ymax-SensorData[idx].ymin)
self.InitiatedCamTracks.tracks[-1].heightDot.data=0
self.InitiatedCamTracks.tracks[-1].confidence=SensorData[idx].confidence
else: # Start of algorithm, no tracks
self.InitiatedCamTracks=trackArrayCam()
self.InitiatedCamTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedCamTracks.tracks=np.append(self.InitiatedCamTracks.tracks,trackCam())
self.InitiatedCamTracks.tracks[-1].Stat.data=-1 # Initiated Track
self.InitiatedCamTracks.tracks[-1].yPx.data=(SensorData[idx].xmax+SensorData[idx].xmin)/2
self.InitiatedCamTracks.tracks[-1].zPx.data=(SensorData[idx].ymax+SensorData[idx].ymin)/2
self.InitiatedCamTracks.tracks[-1].VyPx.data=0
self.InitiatedCamTracks.tracks[-1].VzPx.data=0
self.InitiatedCamTracks.tracks[-1].width.data=(SensorData[idx].xmax-SensorData[idx].xmin)
self.InitiatedCamTracks.tracks[-1].widthDot.data=0
self.InitiatedCamTracks.tracks[-1].height.data=(SensorData[idx].ymax-SensorData[idx].ymin)
self.InitiatedCamTracks.tracks[-1].heightDot.data=0
self.InitiatedCamTracks.tracks[-1].confidence=SensorData[idx].confidence
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
if hasattr(self, 'InitiatedRdrTracks'):# Some (or Zer0) tracks already exists (i.e, not start of algorithm)
toDel=[]
InitiatedRdrTracks=self.InitiatedRdrTracks
# first build array of all sensor indices that are within validation gate of current tracks
if hasattr(self,'CurrentRdrTracks'):
TempCurrTracksRdr=self.CurrentRdrTracks
SensorIndicesInitRdr=[]
for cdx in range(len(TempCurrTracksRdr.tracks)):
SensorIndicesInitRdr.append(self.ValidationGate(SensorData,TempCurrTracksRdr.tracks[cdx]))
else:
SensorIndicesInitRdr=[]
for idx in range(len(self.InitiatedRdrTracks.tracks)):
gateValX=[]
gateValY=[]
gateValRMS=[]
# Find all sensor objects within some gate
if len(SensorData)==0:
continue
for jdx in range(len(SensorData)):
if self.InitSensorValidator(SensorIndicesInitRdr,jdx):
continue
else:
gateValX.append(np.abs(SensorData[jdx].pose.position.x-self.InitiatedRdrTracks.tracks[idx].x.data))
gateValY.append(np.abs(SensorData[jdx].pose.position.y-self.InitiatedRdrTracks.tracks[idx].y.data))
gateValRMS.append(np.sqrt((gateValX[-1])**2+(gateValY[-1])**2))
if len(gateValRMS)==0:
gateValRMS=1000# Arbitrary large value, greater than trackInitRdrThresh
if (np.min(np.array(gateValRMS))<=self.trackInitRdrThresh): # @50Hz, 20m/s in X dir and 10m/s in Y-Direction as validation gate
#If gate is satisfied, move to CurrentRdrTracks after initiating P and deleting that SensorData[idx]
self.InitiatedRdrTracks.tracks[idx].P=Mat_buildROS(np.array([[3,0,0,0],[0,3,0,0],[0,0,3,0],[0,0,0,1]]))
#(Large uncertainity given to Beta. Others conservatively picked based on Delphi ESR spec sheet)
self.InitiatedRdrTracks.tracks[idx].Stat.data=1# Moving to CurrentRdrTracks
x=self.InitiatedRdrTracks.tracks[idx].x.data
y=self.InitiatedRdrTracks.tracks[idx].y.data
Vc=self.InitiatedRdrTracks.tracks[idx].Vc.data
Beta=self.InitiatedRdrTracks.tracks[idx].B.data
psi=np.array([self.psi])
psiD=np.array([self.psiD])
Vt=self.Vt
posNorm=np.sqrt(x**2+y**2)
H31=(Vc*np.sin((psi-Beta).astype(float))*y**2-x*y*(Vc*np.cos((psi-Beta).astype(float))-Vt))/(posNorm**3)
H32=(-Vc*np.sin((psi-Beta).astype(float))*x*y+x**2*(Vc*np.cos((psi-Beta).astype(float))-Vt))/(posNorm**3)
H33=x*np.sin((psi-Beta).astype(float))/posNorm+y*np.cos((psi-Beta).astype(float))/posNorm
H34=(-x*Vc*np.cos((psi-Beta).astype(float))+y*Vc*np.sin((psi-Beta).astype(float)))/posNorm
Hk=np.array([[1,0,0,0],[x/posNorm,y/posNorm,0,0],[H31,H32,H33,H34]])
self.InitiatedRdrTracks.tracks[idx].H=Mat_buildROS(Hk)
if hasattr(self, 'CurrentRdrTracks'):
pass
else:
self.CurrentRdrTracks=trackArrayRdr()
self.CurrentRdrTracks.header=self.InitiatedRdrTracks.header
self.CurrentRdrTracks.tracks=np.append(self.CurrentRdrTracks.tracks,self.InitiatedRdrTracks.tracks[idx])
#Build Arrays for deletion:
toDel.append(idx)
#Also Delete the corresponding SensorData value:
SensorData=np.delete(SensorData,np.argmin(gateValRMS))
else: # none of the SensorData is close to InitiatedRdrTracks[idx], so delete it
toDel.append(idx)
# Clean all InitiatedRdrTracks with status 1
self.InitiatedRdrTracks.tracks=np.delete(self.InitiatedRdrTracks.tracks,toDel)
#Remove old initiated tracks:(if idle for more than 2 time steps):
toDel2=[]
for idx in range(len(self.InitiatedRdrTracks.tracks)):
self.InitiatedRdrTracks.tracks[idx].Stat.data=self.InitiatedRdrTracks.tracks[idx].Stat.data-1
if self.InitiatedRdrTracks.tracks[idx].Stat.data<=-3:
toDel2.append(idx)
self.InitiatedRdrTracks.tracks=np.delete(self.InitiatedRdrTracks.tracks,toDel2)
# Then concatenate remaining sensor Data for future initation
if len(SensorData)==0:
return
self.InitiatedRdrTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedRdrTracks.tracks=np.append(self.InitiatedRdrTracks.tracks,trackRdr())
self.InitiatedRdrTracks.tracks[-1].Stat.data= -1 # InitiatedTrack
self.InitiatedRdrTracks.tracks[-1].x.data=SensorData[idx].pose.position.x
self.InitiatedRdrTracks.tracks[-1].y.data=SensorData[idx].pose.position.y
self.InitiatedRdrTracks.tracks[-1].Vc.data=np.sqrt(SensorData[idx].vx_comp**2+SensorData[idx].vy_comp**2)
self.InitiatedRdrTracks.tracks[-1].B.data=self.psi -(np.arctan(SensorData[idx].pose.position.y/(0.0001 if (SensorData[idx].pose.position.x)==0.0 else (SensorData[idx].pose.position.x))))
# TODO: Improve Beta estimate by taking into account relative Vx(invert heading if object istraveling towards car)
else: # Start of algorithm, no tracks
self.InitiatedRdrTracks=trackArrayRdr()
self.InitiatedRdrTracks.header=SensorData[0].header
for idx in range(len(SensorData)):
self.InitiatedRdrTracks.tracks=np.append(self.InitiatedRdrTracks.tracks,trackRdr())
self.InitiatedRdrTracks.tracks[-1].Stat.data= -1 # InitiatedTrack
self.InitiatedRdrTracks.tracks[-1].x.data=SensorData[idx].pose.position.x
self.InitiatedRdrTracks.tracks[-1].y.data=SensorData[idx].pose.position.y
self.InitiatedRdrTracks.tracks[-1].Vc.data=np.sqrt(SensorData[idx].vx_comp**2+SensorData[idx].vy_comp**2)
self.InitiatedRdrTracks.tracks[-1].B.data=self.psi -(np.arctan(SensorData[idx].pose.position.y/(0.0001 if (SensorData[idx].pose.position.x)==0.0 else (SensorData[idx].pose.position.x))))
# TODO: Improve Beta estimate by taking into account relative Vx(invert heading if object istraveling towards car)
def trackDestructor(self,SensorData):
if not any(SensorData):
return
if isinstance(SensorData[0],CamObj):
if not (hasattr(self,'CurrentCamTracks')):
return
toDel=[]
for idx in range(len(self.CurrentCamTracks.tracks)):
if self.CurrentCamTracks.tracks[idx].Stat.data>=2:# Testing, made less persistent
toDel.append(idx)
self.CurrentCamTracks.tracks=np.delete(self.CurrentCamTracks.tracks,toDel)
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
if not(hasattr(self,'CurrentRdrTracks')):
return
toDel=[]
for idx in range(len(self.CurrentRdrTracks.tracks)):
if self.CurrentRdrTracks.tracks[idx].Stat.data>=4: # If no measurements associated for 4 steps
toDel.append(idx)
self.CurrentRdrTracks.tracks=np.delete(self.CurrentRdrTracks.tracks,toDel)
def trackMaintenance(self,SensorData):
if not any(SensorData):
return
if isinstance(SensorData[0],CamObj):
if not hasattr(self, 'CurrentCamTracks'):
return
SensorIndices=[]
for idx in range(len(self.CurrentCamTracks.tracks)):
SensorIndices.append(self.ValidationGate(SensorData,self.CurrentCamTracks.tracks[idx]))#Clean the incoming data - outputs 2D python array
# Above yields array of possible measurments (only indices) corresponding to a particular track
# startTime1=time.time()
self.KalmanEstimate(SensorData,SensorIndices, self.Method) # Includes DataAssociation Calcs
# print('Time for KalmanEstimate:' + str(time.time()-startTime1))
# startTime2=time.time()
self.KalmanPropagate(SensorData)
# print('Time for KalmanPropagate:' + str(time.time()-startTime2))
self.TrackPubCam.publish(self.CurrentCamTracks)
elif isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
if not hasattr(self, 'CurrentRdrTracks'):
return
SensorIndices=[]
for idx in range(len(self.CurrentRdrTracks.tracks)):
SensorIndices.append(self.ValidationGate(SensorData,self.CurrentRdrTracks.tracks[idx]))#Clean the incoming data - outputs 2D python array
# Above yields array of possible measurments (only indices) corresponding to a particular track
# startTimeKE=time.time()
self.KalmanEstimate(SensorData,SensorIndices, self.Method) # Includes DataAssociation Calcs
# print('Time for KalmanEstimate:' + str(time.time()-startTimeKE))
# startTimeKP=time.time()
self.KalmanPropagate(SensorData)
# print('Time for KalmanPropagate:' + str(time.time()-startTimeKP))
# self.TrackPubRdr.publish(header=self.CurrentRdrTracks.header, tracks =self.CurrentRdrTracks.tracks)
# rospy.loginfo_once('Current tracks published to topic /dataAssoc')
def InitSensorValidator(self,SensorIndicesInit,jdx):
#takes SensorIndices 2 D python array and current Sensor index being checked;
# returns true if the current index is in the 2D array
outBool=False # By default
if len(SensorIndicesInit)==0:
return outBool
for sens_idx in range(len(SensorIndicesInit)):
if jdx in SensorIndicesInit[sens_idx]:
outBool=True
return outBool
def trackPlotter(self):
if not (hasattr(self,'image')) or (self.PlotArg=='0'):
return # Skip function call if image is not available or plotting is disabled
LocalImage=self.image
if (self.PlotArg=='3') or (self.PlotArg=='4'): # Then, plot Radar stuff
if not hasattr(self,'CurrentRdrTracks'):
return # Skip
CurrentRdrTracks=self.CurrentRdrTracks
n=len(CurrentRdrTracks.tracks)
RadarAnglesH=np.zeros((n,1))
RadarAnglesV=np.zeros((n,1))
# Camera Coordinates: X is horizontal, Y is vertical starting from left top corner
CirClr=[]
for idx1 in range(len(CurrentRdrTracks.tracks)):
temp1=np.divide(CurrentRdrTracks.tracks[idx1].y.data,CurrentRdrTracks.tracks[idx1].x.data)
RadarAnglesH[idx1]=-np.degrees(np.arctan(temp1.astype(float)))
temp2=np.divide(self.CamZoffset,CurrentRdrTracks.tracks[idx1].x.data+self.CamXOffset)
RadarAnglesV[idx1]=np.abs(np.degrees(np.arctan(temp2.astype(float)))) #will always be negative, so correct for it
if (CurrentRdrTracks.tracks[idx1].Stat.data>=1) and (CurrentRdrTracks.tracks[idx1].Stat.data<14): #Current Track- green
CirClr.append(np.array([0,255,0]))
elif CurrentRdrTracks.tracks[idx1].Stat.data<=0: # Candidate Tracks for initialization - blue
CirClr.append(np.array([255,0,0]))
else: # Candidate for Destructor-orange
CirClr.append(np.array([0,165,255]))
CameraX=np.dot(RadarAnglesH,(self.image.shape[1]/self.CamFOV)) + self.image.shape[1]/2 # Number of pixels per degree,adjusted for shifting origin from centerline to top left
CameraY=np.dot(RadarAnglesV,(self.image.shape[0]/(39.375))) +480/2 # Number of pixels per degree,adjusted for shifting origin from centerline to top left
CirClr=np.array(CirClr)
CameraX=np.array(CameraX)
for idx3 in range(len(CameraX)):
if (CameraX[idx3]<=self.image.shape[1]):
LocalImage=cv2.circle(LocalImage, (int(CameraX[idx3]),int(CameraY[idx3])), 12, CirClr[idx3].tolist(),3)
LocalImage=cv2.putText(LocalImage,str(idx3),(int(CameraX[idx3]),int(CameraY[idx3])),self.font,1,(255,105,180),2)
#Now Plot Camera Trakcs:
if (self.PlotArg=='2') or (self.PlotArg=='4'): # Then, plot Cam stuff
if not hasattr(self,'CurrentCamTracks'):
return # Skip
CurrentCamTracks=self.CurrentCamTracks
RectClr=[]
for jdx in range(len(CurrentCamTracks.tracks)):
if (CurrentCamTracks.tracks[jdx].Stat.data>=1) and (CurrentCamTracks.tracks[jdx].Stat.data<14): #Current Track- green
RectClr.append(np.array([0,255,0]))
elif CurrentCamTracks.tracks[jdx].Stat.data<=0: # Candidate Tracks for initialization - blue
RectClr.append(np.array([255,0,0]))
else: # Candidate for Destructor-orange
RectClr.append(np.array([0,165,255]))
for idx2 in range(len(CurrentCamTracks.tracks)):
start=(int(CurrentCamTracks.tracks[idx2].yPx.data-CurrentCamTracks.tracks[idx2].width.data/2),int(CurrentCamTracks.tracks[idx2].zPx.data-CurrentCamTracks.tracks[idx2].height.data/2))
end= (int(CurrentCamTracks.tracks[idx2].yPx.data+CurrentCamTracks.tracks[idx2].width.data/2),int(CurrentCamTracks.tracks[idx2].zPx.data+CurrentCamTracks.tracks[idx2].height.data/2))
LocalImage=cv2.rectangle(LocalImage,start,end,RectClr[idx2].tolist(),2)
if (self.PlotArg=='1') or (self.PlotArg=='4'): # Only plot self.CombinedTracks
if not hasattr(self,'CombinedTracks'):
return
currCombinedTracks=self.CombinedTracks
RectClr=[]
for jdx in range(len(currCombinedTracks.tracks)):
RectClr.append(np.array([102,255,255])) # Yellow
for idx2 in range(len(currCombinedTracks.tracks)):
start=(int(currCombinedTracks.tracks[idx2].yPx.data-currCombinedTracks.tracks[idx2].width.data/2),int(currCombinedTracks.tracks[idx2].zPx.data-currCombinedTracks.tracks[idx2].height.data/2))
end= (int(currCombinedTracks.tracks[idx2].yPx.data+currCombinedTracks.tracks[idx2].width.data/2),int(currCombinedTracks.tracks[idx2].zPx.data+currCombinedTracks.tracks[idx2].height.data/2))
LocalImage=cv2.rectangle(LocalImage,start,end,RectClr[idx2].tolist(),2)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(LocalImage, "bgr8"))
rospy.loginfo_once('Image is being published')
def CamRdrCombine(self):
if not hasattr(self,'CurrentCamTracks') or (not hasattr(self,'CurrentRdrTracks')):
return
self.CombinedTracks=trackArrayCam()
n=len(self.CurrentCamTracks.tracks)
LocalRdrYArr=[]
for rdx in range(len(self.CurrentRdrTracks.tracks)):
temp1=np.divide(self.CurrentRdrTracks.tracks[rdx].y.data,self.CurrentRdrTracks.tracks[rdx].x.data)
temp2=-np.degrees(np.arctan(temp1.astype(float)))
LocalRdrYArr.append(np.dot(temp2,(self.image.shape[1]/self.CamFOV)) + self.image.shape[1]/2+self.HorzOffset) # Gives all Y-coord (pixels) of all radar tracks
for jdx in range(n):
radius=(self.CurrentCamTracks.tracks[jdx].width.data+self.CurrentCamTracks.tracks[jdx].height.data)/2+self.CombGateThresh
centerY=self.CurrentCamTracks.tracks[jdx].yPx.data
for Rdx in range(len(LocalRdrYArr)):
if (abs(LocalRdrYArr[Rdx]-centerY)<=radius) or (self.CurrentCamTracks.tracks[jdx].confidence>=0.36):
self.CurrentCamTracks.tracks[jdx].Stat.data=99 #To indicate that the status is combined/validated
#TODO: Create a custom CombinedTracks Message that has both radar and Camera info?
self.CombinedTracks.tracks.append(self.CurrentCamTracks.tracks[jdx])
break
else:
continue
def trackManager(self,SensorData):
# startTime01=time.time()
self.trackMaintenance(SensorData)
# print('Time for Track Maint:' + str(time.time()-startTime01))
# startTime02=time.time()
self.trackInitiator(SensorData)
# print('Time for Track Init:' + str(time.time()-startTime02))
# startTime03=time.time()
self.trackDestructor(SensorData)
# print('Time for Track Destr:' + str(time.time()-startTime03))
# startTime04=time.time()
self.trackPlotter()
# print('Time for Track Plotter:' + str(time.time()-startTime04))
# startTime05=time.time()
if hasattr(self,'CurrentCamTracks') or hasattr(self,'CurrentRdrTracks'):
s= '# Cam Tracks: ' + (str(len(self.CurrentCamTracks.tracks)) if hasattr(self,'CurrentCamTracks') else 'None') + \
'; Rdr Tracks: ' + (str(len(self.CurrentRdrTracks.tracks)) if hasattr(self,'CurrentRdrTracks') else 'None') +'; # Combined Tracks:'\
+(str(len(self.CombinedTracks.tracks)) if hasattr(self,'CombinedTracks') else 'None')
print(s)
# print('Time printing in track manager:' + str(time.time()-startTime05))
def DataAssociation(self,SensorData,SensorIndices,Method):
if Method=="Hungarian":
pass
elif Method=="JPDA":
#Build A Validation Matrix if there are sufficient sensor data and tracks
if (len(SensorData)<1) or (len(self.CurrentRdrTracks.tracks)<1):
Yk=[]
else:
Yk=[]
#create empty Yk list, with given number of targets (currentTracks):
for l_dx in range(len(self.CurrentRdrTracks.tracks)):
Yk.append([])
C=3 # Number of false measurements per unit volume (assume), clutter density
Pd=0.9 #Probability of detection
# Create Clusters by cycling through SensorIndices, maintain
OpenList=[]
ClusterList=[]
for tempdx in range(len(self.CurrentRdrTracks.tracks)):
OpenList.append(tempdx)
OpenList=np.array(OpenList)
while any(OpenList):
tempClusterList=[]
tempClusterList.append(OpenList[0])
SensorRdgList=np.array(SensorIndices[OpenList[0]]).flatten()
OpenList=np.delete(OpenList,0) # Remove this element from searchable list of tracks, will be added later to ClusterList
# Chase down all other tracks that share common sensor measurements
n_meas=len(SensorData) # Total number of possible measurements
for m_dx in range(n_meas):
if m_dx in SensorRdgList:
ToDelOpenList=[]
for cluster_dx in OpenList:
indices = [i for i, obj in enumerate(SensorIndices[cluster_dx]) if obj == m_dx]
if any(indices) and (not (cluster_dx in tempClusterList)) :
tempClusterList.append(cluster_dx)
ToDelOpenList.append(cluster_dx) # To be Deleted from OpenList
np.append(SensorRdgList,SensorIndices[cluster_dx]).flatten()
OpenList=np.setdiff1d(OpenList,ToDelOpenList) # Remove from OpenList
else:
continue
# Now add this cluster to ClusterList
ClusterList.append(tempClusterList)
### Directly calculate Bjt if cluster size is 1:4 as per Bose Paper
# First calculate Yjt and Sjt:
for tdx in range(len(self.CurrentRdrTracks.tracks)):
# Calculate Y_jt and S_jt
# First Sjt, since it only depends on t, not j
Sjt=np.zeros((len(self.CurrentRdrTracks.tracks),3,3))
Hk=Mat_extractROS(self.CurrentRdrTracks.tracks[tdx].H)
Pk=Mat_extractROS(self.CurrentRdrTracks.tracks[tdx].P)
Sjt[tdx]=np.matmul(np.matmul(Hk,Pk),Hk.T)+self.R_rdr
def PjtCalc(meas_dx,target_dx,YjtLocal,Sjt):
if meas_dx in SensorIndices[target_dx]:
Pjt=Pd*np.exp(-np.matmul(np.matmul(YjtLocal[:,meas_dx].T,Sjt[target_dx]),YjtLocal[:,meas_dx])/2)/(np.sqrt((2*np.pi)*np.linalg.det(Sjt[target_dx])))
else:
Pjt=0
return Pjt
def GjCal(meas_dx,target_dx1, target_dx2,YjtLocal,Sjt):
Gj=PjtCalc(meas_dx,target_dx1,YjtLocal,Sjt)*PjtCalc(meas_dx,target_dx2,YjtLocal,Sjt)
return Gj
def YjtCalc(t_idx):
yt=np.array([self.CurrentRdrTracks.tracks[t_idx].x.data,self.CurrentRdrTracks.tracks[t_idx].y.data, \
self.CurrentRdrTracks.tracks[t_idx].Vc.data]).reshape(3,1)
Yjt=np.zeros((3,len(SensorIndices[t_idx])))
for jdx in range(len(SensorIndices[t_idx])):
yjt=np.array([SensorData[SensorIndices[t_idx][jdx]].pose.position.x,SensorData[SensorIndices[t_idx][jdx]].pose.position.y, \
np.sqrt(SensorData[SensorIndices[t_idx][jdx]].vx_comp**2+SensorData[SensorIndices[t_idx][jdx]].vy_comp**2)]).reshape(3,1)
Yjt[:,jdx]=(yjt-yt).reshape(3)
return Yjt
for clusterItem in ClusterList:
if len(clusterItem)==1:
B0t=C*(1-Pd)
Yjt=YjtCalc(clusterItem[0])
c=B0t
if len(SensorIndices[clusterItem[0]])>0:
Z_temp=np.zeros_like(Yjt[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt=PjtCalc(j_idx,clusterItem[0],Yjt,Sjt)
c=c+Bjt
Z_temp=Z_temp+Bjt*Yjt[:,j_idx]
Yk[clusterItem[0]]=Z_temp/c
else: # No measurement associated with this particular object in clusterItem
pass # Already Yk[clusterItem[0]] =[] by default
elif len(clusterItem)==2:
P0=C*(1-Pd)
P1=P0
P2=P0
#Build P1:
Yjt1=YjtCalc(clusterItem[0])
for jdx in range(len(SensorIndices[clusterItem[0]])):
P1=P1+PjtCalc(jdx,clusterItem[0],Yjt1,Sjt)
# Build P2:
Yjt2=YjtCalc(clusterItem[1])
for jdx in range(len(SensorIndices[clusterItem[1]])):
P2=P2+PjtCalc(jdx,clusterItem[1],Yjt2,Sjt)
# Now build Bjts:
B0t1=P0*P2
c1=B0t1
# Calculate Bjt1:
Z_temp=np.zeros_like(Yjt1[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt1=PjtCalc(j_idx,clusterItem[0],Yjt1,Sjt)*(P2-PjtCalc(j_idx,clusterItem[1],Yjt1,Sjt))
c1=c1+Bjt1
Z_temp=Z_temp+Bjt1*Yjt1[:,j_idx]
# Add to Yk:
Yk[clusterItem[0]]=Z_temp/c1
# Now Calculate Bjt2:
B0t2=P0*P1
c2=B0t2
Z_temp=np.zeros_like(Yjt2[:,0])
for j_idx in range(len(SensorIndices[clusterItem[1]])):
Bjt2=PjtCalc(j_idx,clusterItem[1],Yjt2,Sjt)*(P1-PjtCalc(j_idx,clusterItem[0],Yjt2,Sjt))
c2=c2+Bjt2
Z_temp=Z_temp+Bjt2*Yjt2[:,j_idx]
# Add to Yk:
Yk[clusterItem[1]]=Z_temp/c1
elif len(clusterItem)==2:
# Build P's:
P0=C*(1-Pd)
P1=P0
P2=P0
P3=P0
#Build P1:
Yjt1=YjtCalc(clusterItem[0])
for jdx in range(len(SensorIndices[clusterItem[0]])):
P1=P1+PjtCalc(jdx,clusterItem[0],Yjt1,Sjt)
# Build P2:
Yjt2=YjtCalc(clusterItem[1])
for jdx in range(len(SensorIndices[clusterItem[1]])):
P2=P2+PjtCalc(jdx,clusterItem[1],Yjt2,Sjt)
# Build P3:
Yjt3=YjtCalc(clusterItem[2])
for jdx in range(len(SensorIndices[clusterItem[2]])):
P3=P3+PjtCalc(jdx,clusterItem[2],Yjt3,Sjt)
# Now Build G's:
G23=0
for jdx in range(len(SensorIndices[clusterItem[0]])):
G23=G23+GjCal(jdx,1,2,Yjt1)
G13=0
for jdx in range(len(SensorIndices[clusterItem[1]])):
G13=G13+GjCal(jdx,0,2,Yjt2)
G12=0
for jdx in range(len(SensorIndices[clusterItem[2]])):
G12=G12+GjCal(jdx,0,1,Yjt3)
# Now Build Bjt's:
B0t1=P0*(P2*P3-G23)
c1=B0t1
B0t2=P0*(P1*P3-G13)
c2=B0t2
B0t3=P0*(P1*P2-G12)
c3=B0t3
Z_temp=np.zeros_like(Yjt1[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt1=PjtCalc(j_idx,0,Yjt1,Sjt)*((P2-PjtCalc(j_idx,1,Yjt2,Sjt))*(P3-PjtCalc(meas_dx,2,Yjt3,Sjt))\
-(G23-GjCal(j_idx,1,2,Yjt1,Sjt)))
c1=c1+Bjt1
Z_temp=Z_temp+Bjt1*Yjt1[:,j_idx]
Yk[clusterItem[0]]=Z_temp/c1
Z_temp=np.zeros_like(Yjt2[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt2=PjtCalc(j_idx,1,Yjt2,Sjt)*((P1-PjtCalc(j_idx,0,Yjt1,Sjt))*(P3-PjtCalc(meas_dx,2,Yjt3,Sjt))\
-(G13-GjCal(j_idx,0,2,Yjt2,Sjt)))
c2=c2+Bjt2
Z_temp=Z_temp+Bjt2*Yjt2[:,j_idx]
Yk[clusterItem[1]]=Z_temp/c2
Z_temp=np.zeros_like(Yjt3[:,0])
for j_idx in range(len(SensorIndices[clusterItem[0]])):
Bjt3=PjtCalc(j_idx,2,Yjt3,Sjt)*((P1-PjtCalc(j_idx,0,Yjt1,Sjt))*(P2-PjtCalc(meas_dx,1,Yjt2,Sjt))\
-(G12-GjCal(j_idx,0,1,Yjt3,Sjt)))
c3=c3+Bjt3
Z_temp=Z_temp+Bjt3*Yjt3[:,j_idx]
Yk[clusterItem[2]]=Z_temp/c3
# If cluster size is greater than 4, use approximation as per paper (TODO, if required)
else:
print('Large Cluster Density, Skipping Data Association!!')
pass
return Yk
elif Method=="Greedy": # Simple method that just outputs the closest UNUSED measurement
if isinstance(SensorData[0],RadarObj) or isinstance(SensorData[0],RadarObjMKZ):
# Sensor indices is a 2D python list, not numpy array
usedSensorIndices=[]
Yk=[] # A python list of sensor measurements corresponding to each CurrentTrack
for idx in range(len(self.CurrentRdrTracks.tracks)):
gateValX=[]
gateValY=[]
gateValRMS=[]
if len(SensorIndices[idx])==0:
Yk.append([])
continue
else:
# print(len(SensorIndices[idx]))
for jdx in range(len(SensorIndices[idx])):
gateValX.append(np.abs(SensorData[SensorIndices[idx][jdx]].pose.position.x-self.CurrentRdrTracks.tracks[idx].x.data))
gateValY.append(np.abs(SensorData[SensorIndices[idx][jdx]].pose.position.y-self.CurrentRdrTracks.tracks[idx].y.data))
gateValRMS.append(np.sqrt(((gateValX[jdx])**2+(gateValY[jdx])**2).astype(float)))
if np.min(gateValRMS)<=self.GateThreshRdr:
sensIdx=int(np.argmin( | np.array(gateValRMS) | numpy.array |
# Python 3.7.2 version of the ODELAY Image Pipeline
import cv2
from fast_histogram import histogram1d
import h5py
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import sqlalchemy as db
import pathlib
import re
import scipy.io as sio
from scipy.sparse import csr_matrix
from scipy.optimize import minimize
import time
# internal libraries
import tools.fileio as fio
def readImage(fileName):
imageData = fio.loadmatlab(fileName)
# fhandle = h5py.File(fileName, 'r')
return imageData
def readExcelSheetDisc(fileName):
if fileName == None:
fileName = fio.openFileDialog()
df = pd.read_excel('fileName', sheetname='Sheet1')
print("Column headings:")
print(df.columns)
def readExpDisc(fileName):
# Reture ExpObj
if fileName ==None:
fileName = fio.openFileDialog()
expData = fio.loadData(fileName)
return CurrExp
def roiLoadState(monitorDataFile, roiFile):
return None
def initializeExperiment(imagePath, dataPath):
'''
Write ODELAY Index File to initialize experiment and provide a list of roi to process as well as experiment variables.
Critical variables:
starting time--must be before all file time points
magnification
pixel size
sensor size
Future versions of the microscope control software will write this data into the images.
1. Make ROI Dict that includes Paths to files and number of images in each file.
2. Make Dict of microscope parameters magnification and pixel size and sensor data
3. Write those variables to a hdf5 file for retrival by workers processing each ROI individually.
'''
# Parse argument and check to see if it is a path file.
if isinstance(imagePath, str):
imagePath = pathlib.Path(imagePath)
if isinstance(dataPath, str):
dataPath = pathlib.Path(dataPath)
stageFile = imagePath / 'ODELAY_StageData.mat'
expName = imagePath.parts[-1]
stageData = fio.loadData(stageFile)
roiIndex = stageData['mP']['wellIdx']-1
roiList = list(stageData['mP']['wellID'][roiIndex])
roiList.sort()
# Read in which folders are there and check
roiFiles = getRoiFileList(imagePath, roiList)
backgroundImage = generateBackground(imagePath, roiList[:5])
# TODO: These need to be defined by the mocroscope
# Magnificaton, pixel size, camera dimensions, image state,
# and image orientation, stage direction possibly pass in image files.
if backgroundImage.shape[0] == 2048:
magnification = 10
pixSize = 6.5
else:
magnification = 20
pixSize = 6.45
odelayDataPath = dataPath / 'ODELAY Roi Data'
if not odelayDataPath.exists():
odelayDataPath.mkdir()
initFileName = expName + '_Index_ODELAYData.hdf5'
expInitFilePath = dataPath / initFileName
expDictionary = {
'backgroundImage': backgroundImage,
'defaultFitRanges': np.array([0,0]),
'maxObj': 5000,
'numTimePoints': 320, # number of timeponts
'timerIncrement': 1800, # timer increment in seconds
'threshold_offset': 1,
'pixSize': pixSize,
'sensorSize': np.array(backgroundImage.shape,dtype='int32'),
'magnification': magnification,
'coarseness': 25,
'kernalerode': 3,
'kernalopen': 8,
'roiFiles': roiFiles,
'experiment_name': expName,
'odelayDataPath': str(odelayDataPath),
'expInitFilePath': str(expInitFilePath)
}
fio.saveDict(expInitFilePath, expDictionary)
return expDictionary
def generateBackground(imagePath, roiList):
'''
Generate sensor background by averaging a number of initial images given by the length of the roiList.
'''
# ToDo: add in multicolor support for fluorescent images
numImage = len(roiList)
roiPath = pathlib.Path('./'+ roiList[0])
imageFileName = pathlib.Path('./'+ roiList[0] + '_1.mat')
imageFilePath = imagePath / roiPath / imageFileName
imageData = fio.loadData(imageFilePath)
imageDim = imageData['rawImage'].shape
accumeImage = np.zeros(imageDim[0:2], dtype= 'float')
imageDevisor = float(numImage * imageDim[2])
for im in range(numImage):
roiPath = pathlib.Path('./'+ roiList[im])
imageFileName = pathlib.Path('./'+ roiList[im] + '_1.mat')
imageFilePath = imagePath / roiPath / imageFileName
imageData = fio.loadData(imageFilePath)
for tile in range(imageDim[2]):
floatImage = (1/imageDevisor) * imageData['rawImage'][:,:,tile].astype('float')
accumeImage += floatImage
accumeImage-= np.min(accumeImage)
return accumeImage.astype('uint16')
def roiProcess(imagepath, datapath, roiID, verbos = False):
'''
Data from Experiment Dictionary or Object
'''
if isinstance(imagepath, str):
imagePath = pathlib.Path(imagepath)
else:
imagePath = imagepath
if isinstance(datapath, str):
dataPath = pathlib.Path(datapath)
else:
dataPath = datapath
indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')]
if len(indexList)==1:
expIndexPath = dataPath / indexList[0]
else:
print('Could not find the correct index file or there were more than one in the diretory')
expData = fio.loadData(expIndexPath)
#####################################
# Load Dictionary variables There has to be a way to dynamically add these
#####################################
background = expData['backgroundImage']
defaultFitRanges = expData['defaultFitRanges']
maxObj = expData['maxObj']
numTimePoints = expData['numTimePoints'] # number of timeponts
timerIncrement = expData['timerIncrement'] # timer increment in seconds
threshold_offset = expData['threshold_offset']
pixSize = expData['pixSize']
sensorSize = expData['sensorSize']
magnification = expData['magnification']
coarseness = expData['coarseness']
kernalerode = expData['kernalerode']
kernalopen = expData['kernalopen']
roiFiles = expData['roiFiles']
experiment_name = expData['experiment_name']
odelayDataPath = dataPath / 'ODELAY Roi Data'
############################
# expData dictionary is a hdf5 file that will contain the correct information
# initialize the experiment. Perhaps it should be an ini file but at the momement its not
# defaultFitRanges = None
# maxObj = 5000
# numTimePoints = 320 # number of timeponts
# timerIncrement = 1800 # timer increment in seconds
# threshold_offset = 1
# pixSize = 6.45
# magnification = 20
# courseness = 25
# kernalerode = 3
# kernalopen = 8
############################
# monitorData = fio.loadmat(monitorDataFile)
# % Load Well Data
# TODO: loadWell State for cronjob or monitor data files
# Load state from Database or create one if it doesn't exist
# Check number of images analyzed and number not analyzed
# NewLoadImage +
# LoadOldImage +
# ThresholdOldImage +
# ThresholdNewImage +
# PhaseCorrelate Old New Evaluate SampleDrift +
# BlobAnalysis +
# Object Track -+
# EnterData into ObjectNext and ObjectTrack Data -+
# Estimate Growth curves -+
# Save Low Bit Depth Image for display
# Update well analysis
# Shut down workers once caught up.
'''
The following code is to initialize data for all wells
'''
if isinstance(roiID, str):
roiLabel = roiID
elif isinstance(roiID, int):
roiList = [*roiFiles]
roiLabel = roiList[roiID]
# Else this will crash
roiPath = imagePath / roiLabel
imageFileList = os.listdir(roiPath)
# Understand this gem of a regular expression sort.
imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
numImages = len(imageFileList)
if numTimePoints<numImages:
numTimePoints = numImages
threshold = np.zeros(numTimePoints, dtype='uint16') # Array 1 x numTimePoints uint16
# imageFileList = []# List of strings
stitchMeta = {} # Dictionary or list for image stitching data
xyzTime = np.zeros((numTimePoints, 4), dtype ='float64')
timePoints = np.full( numTimePoints, 'nan', dtype='float64') # Array dbl 1 x numTimePoints double
objectNext = np.zeros((maxObj, numTimePoints), dtype='uint16') # Array maxObj x numTimePoints uint16
objectTrack = np.zeros((maxObj, numTimePoints), dtype='uint16') # Array maxObj x numTimePoints uint16
objectArea = np.zeros((maxObj, numTimePoints), dtype='uint32') # Array maxObj x numTimePoints double
objectCentX = np.zeros((maxObj, numTimePoints), dtype='float64') # Array maxObj x numTimePoints double
objectCentY = np.zeros((maxObj, numTimePoints), dtype='float64') # Array maxObj x numTimePoints double
numObj = np.zeros(numTimePoints, dtype = 'float64')
sumArea = np.zeros( numTimePoints, dtype = 'float64')
fitData = np.zeros((maxObj, 17), dtype='float64') # Dictionary array maxObj x 17 double
imageHist = np.zeros((numTimePoints, 2**16), dtype = 'uint32')
analyzeIndex = np.zeros(numTimePoints, dtype = 'bool')
xyDisp = np.zeros((numTimePoints, 4), dtype = 'int32')
prImage ={}
# End Initialization
# processTime = np.zeros()
tstart = time.time()
print(f'The ROI is {roiID}')
# Start Processing Data Here
for aI in range(numImages):
t0 = time.time()
# load New Image
imageFilePath = roiPath / imageFileList[aI]
anImage = stitchImage(imageFilePath, pixSize, magnification, background)
#TODO: Generate a thumbnail of the stitched image for use in the GUI later
stitchMeta.update({f'{aI:03d}': anImage['stitchMeta']})
xyzTime[aI,:] = anImage['stitchMeta']['xyzTime'][0:4]
xyDim = anImage['Bf'].shape
sobelBf = SobelGradient(anImage['Bf'])
sobelCent = SobelGradient(anImage['centIm'])
threshold[aI] = thresholdImage(sobelBf, threshold_offset, coarseness)
imageHist[aI,:] = histogram1d(sobelBf.ravel(), 2**16, [0,2**16], weights = None).astype('uint32')
bwBf = np.greater(sobelBf, threshold[aI]).astype('uint8')
akernel = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]], dtype='uint8')
# dilate
# fill
# erode
# open
# bwBf = cv2.dilate(bwBf, akernel, iterations = 1)
# okernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalopen , kernalopen))
# bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_CLOSE,okernel)
# bwBf = cv2.erode( bwBf, akernel, iterations = 1)
# bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_OPEN, okernel)
#######
# Python Implementation
ekernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalerode, kernalerode))
okernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalopen , kernalopen))
bwBf = cv2.dilate(bwBf, ekernel, iterations = 1)
bwBf = cv2.erode( bwBf, ekernel, iterations = 1)
bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_OPEN, okernel)
bwBf = cv2.morphologyEx(bwBf, cv2.MORPH_CLOSE,okernel)
bwBf[1, :] = 1
bwBf[:, 1] = 1
bwBf[:,-1] = 1
bwBf[-1,:] = 1
sumArea[aI] = np.sum(bwBf)
anImage['sobelBf'] = sobelBf
anImage['bwBf'] = bwBf
imageStats = cv2.connectedComponentsWithStats(bwBf, 8, cv2.CV_32S)
# imageStats[0] is the number of objects detected
# imageStats[1] is the labeled image uint32
# imageStats[2] is a number of objects x 5 List that are object stats
# imageStats[3] is object centroids
# TODO: Extract Fluorescence data from Fluoresences image
# This will be done either using the threshold areas in the
# labeled Image to extract corresponding areas in the
# fluoresence image and then summing those areas
if aI != 0:
# Centroid Association
# Figure out what the image shift is from the previous Images
bw1 = np.greater(sobelCent, threshold[aI]).astype('uint8')
bw2 = np.greater(prImage['sobelCent'], threshold[aI]).astype('uint8')
# Use FFT phase corelation to determin the offet
fT = np.multiply(anImage['fTrans'], prImage['fTrans'].conj())
fTabs = np.divide(fT,abs(fT))
fmag1 = np.fft.ifft2(fTabs)
fmag1[0,0] = 0 # The first index of fmag is always 1 so ignor it.
r, c = np.where(fmag1 == fmag1.max())
xyDim = anImage['centIm'].shape
row = [xyDim[0]-r[0], r[0]]
col = [xyDim[1]-c[0], c[0]]
rDisp = np.zeros((16,3), dtype = 'int32')
cDisp = np.zeros((16,3), dtype = 'int32')
cnt = 0
for r in row:
for c in col:
rDisp[cnt,:] = [r,0,r]
cDisp[cnt,:] = [c,0,c]
cnt += 1
rDisp[cnt,:] = [0,r,r]
cDisp[cnt,:] = [0,c,c]
cnt += 1
rDisp[cnt,:] = [r,0,r]
cDisp[cnt,:] = [0,c,c]
cnt += 1
rDisp[cnt,:] = [0,r,r]
cDisp[cnt,:] = [c,0,c]
cnt += 1
cond = np.zeros(16,dtype = 'int32')
for n in range(16):
sw1 = np.zeros((xyDim[0] + rDisp[n,2] , xyDim[1] + cDisp[n,2]), dtype = 'uint8')
sw2 = np.zeros((xyDim[0] + rDisp[n,2] , xyDim[1] + cDisp[n,2]), dtype = 'uint8')
swT = np.zeros((xyDim[0] + rDisp[n,2] , xyDim[1] + cDisp[n,2]), dtype = 'uint8')
rs1 = rDisp[n,0]
re1 = rDisp[n,0] + xyDim[0]
cs1 = cDisp[n,0]
ce1 = cDisp[n,0] + xyDim[1]
rs2= rDisp[n,1]
re2= rDisp[n,1] + xyDim[0]
cs2= cDisp[n,1]
ce2= cDisp[n,1] + xyDim[1]
sw1[rs1:re1, cs1:ce1] = bw1
sw2[rs2:re2, cs2:ce2] = bw2
swT = sw1*sw2
cond[n] = swT.sum(axis = None, dtype = 'float')
ind = cond.argmax()
xyDisp[aI,:] = np.array((rDisp[ind,0],cDisp[ind,0],rDisp[ind,1],cDisp[ind,1]), dtype = 'int32')
# this gives the overlap vector for aligning the images
# Set image Dimensions so they are identical.
xyDim = bwBf.shape
xyDimP = prImage['bwBf'].shape
maxDim = np.max([xyDim, xyDimP],axis = 0)
maxDisp = np.array((xyDisp[aI,[0,2]].max(), xyDisp[aI,[1,3]].max()),dtype = 'int32')
# To do include translation from images earlier.
alDim = np.floor((maxDim-xyDim)/2).astype('int')
auDim = maxDim-np.ceil((maxDim-xyDim)/2).astype('int')
blDim = np.floor((maxDim-xyDimP)/2).astype('int')
buDim = maxDim-np.ceil((maxDim-xyDimP)/2).astype('int')
arsV = alDim[0] + xyDisp[aI,0]
areV = auDim[0] + xyDisp[aI,0]
acsV = alDim[1] + xyDisp[aI,1]
aceV = auDim[1] + xyDisp[aI,1]
brsV = blDim[0] + xyDisp[aI,2]
breV = buDim[0] + xyDisp[aI,2]
bcsV = blDim[1] + xyDisp[aI,3]
bceV = buDim[1] + xyDisp[aI,3]
A = np.zeros((maxDim + maxDisp),dtype = 'uint8')
B = np.zeros((maxDim + maxDisp),dtype = 'uint8')
aLbl = np.zeros((maxDim + maxDisp),dtype = 'uint16')
bLbl = np.zeros((maxDim + maxDisp),dtype = 'uint16')
A[arsV:areV,acsV:aceV] = bwBf
B[brsV:breV,bcsV:bceV] = prImage['bwBf']
aLbl[arsV:areV,acsV:aceV] = imageStats[1]
bLbl[brsV:breV,bcsV:bceV] = prevImStats[1]
# % Multiply black and white Images together. This makes a mask
# % where colonies overlap.
M = A*B
ALbl = aLbl*M # Current Labeled Image
BLbl = bLbl*M # Prev Labeled Image
ccM = cv2.connectedComponents(M, 8, cv2.CV_32S)
numObj[aI] = ccM[0]
if ccM[0] >5000:
print('Number of objectes in ', aI, ' greater than 5000')
# ccM is the total number of objects returned in the image
ARvl = ALbl.ravel()
BRvl = BLbl.ravel()
MRvl = ccM[1].ravel()
# Create a sparce matrix of the labeled connected component image
smM = csr_matrix((MRvl, [MRvl, np.arange(MRvl.shape[0])] ),
shape=(ccM[0],MRvl.shape[0]))
# Get the indices of the non-zero elements of the connected
# connected components. Use a list comprehension and
# np.split to find the indicies of each labled area in the ccM
# matrix. Then make sure that the lables of ALbl and BLbl are
# unique by taking the absolute value of the difference between
# all the Labeled pixels and summing them. If all pixels are
# are identical then that function diffsum should return zero.
# If both Labels in each image are unique then no merging of
# overlaping objects has occured.
trkInds = np.array(([
[ARvl[inds[0]], BRvl[inds[0]]]
for inds in np.split(smM.indices, smM.indptr[1:-1])
if diffsum(ARvl[inds])==0 and diffsum(BRvl[inds])==0
]), dtype = 'int')
# Place objects that were linked in the Object Next list into an easier to
# address Object Track List.
if np.max(trkInds)>=5000:
tempInds = trkInds>4999
trkInds[tempInds] = 0
objectNext[trkInds[:,1],aI-1] = trkInds[:,0]
rc = objectNext.shape
nextHist = histogram1d(objectNext[:,aI-1],rc[0],[0,rc[0]],weights = None).astype('int')
discard = np.where(nextHist>1)
for val in discard[0]:
inds = np.where(objectNext[:,aI-1]==val)
objectNext[inds,aI-1] = 0
curInds = np.arange(maxObj, dtype = 'int')
curVec = curInds[objectTrack[:,aI-1]!=0]
nextVec = objectTrack[curVec,aI-1]
if nextVec.shape != 0:
objectTrack[curVec,aI] = objectNext[nextVec,aI-1]
curVec = curInds[objectTrack[:,aI]!=0]
objVec = objectTrack[curVec,aI]
objectArea[ curVec, aI] = imageStats[2][objVec,4]
objectCentX[curVec, aI] = imageStats[3][objVec,0]
objectCentY[curVec, aI] = imageStats[3][objVec,1]
# Generate Timepoints for this Data-Set
timePoints[aI] = (xyzTime[aI,3]-xyzTime[0,3])*1440 # Matlab stores data in fractional number of days. Convert to minutes number of minutes in a day
elif aI == 0:
curVec = np.arange(imageStats[0], dtype = 'int')
timePoints[aI] = 0
objectTrack[0:imageStats[0],0] = np.arange(imageStats[0], dtype = 'uint16')
objectArea[ curVec, aI] = imageStats[2][curVec,4]
objectCentX[curVec, aI] = imageStats[3][curVec,0]
objectCentY[curVec, aI] = imageStats[3][curVec,1]
# set up for next Image by replacing the previous image information
prImage = anImage
prImage['sobelCent'] = sobelCent
prevImStats = imageStats
t1 = time.time()
print('Image ', aI, ' took ', t1-t0, ' seconds')
print((t1-tstart)/60, ' minutes have elapsed')
# breakpoint()
# This is a filter to get rid of very big stpes in the objectArea that
# may be due to either loss of focus or other imaging problems
log2Area = np.log2(objectArea.astype('float'))
diffArea = np.diff(log2Area,axis=1,n=1, append=0)
diffAreaAbs = np.abs( diffArea)
dbInds = diffAreaAbs>1
bgSteps = np.cumsum(dbInds,axis=1)==0
objectArea[~bgSteps]= 0
indVec = np.arange(maxObj)
numObs = np.sum(objectArea!=0, axis = 1)
fitVec = indVec[numObs>5]
for m in fitVec:
(fitCols, fitData[m,0:16]) = fitGrowthCurves(timePoints, objectArea[m,:],defaultFitRanges)
if len(fitVec)==0:
fitCols = {'No Data Fit':1}
# returnDict = {'anImage': anImage,
# 'prImage': prImage,
# 'background': background,
# 'stitchMeta': stitchMeta,
# 'imageHist': imageHist,
# 'timePoints': timePoints,
# 'objectArea': objectArea,
# 'objectTrack': objectTrack,
# 'objectCentX': objectCentX,
# 'objectCentY': objectCentY,
# 'objectNext': objectNext,
# 'threshold': threshold,
# 'numObj': numObj,
# 'sumArea': sumArea,
# 'xyDisp': xyDisp,
# 'xyzTime': xyzTime,
# 'fitData': fitData,
# 'roiLabel': roiLabel
# }
returnDict = {'stitchMeta': stitchMeta,
'imageHist': imageHist,
'timePoints': timePoints,
'objectArea': objectArea,
'objectTrack': objectTrack,
'objectCentX': objectCentX,
'objectCentY': objectCentY,
'objectNext': objectNext,
'threshold': threshold,
'sumArea': sumArea,
'numObj': numObj,
'xyDisp': xyDisp,
'xyzTime': xyzTime,
'fitData': fitData,
'fitDataCols': fitCols,
'roiLabel': roiLabel
}
fio.saveROI(odelayDataPath, returnDict)
return returnDict
def roiMacInfo(imagepath, datapath, roiID, verbos = False):
'''
Data from Experiment Dictionary or Object
'''
if isinstance(imagepath, str):
imagePath = pathlib.Path(imagepath)
else:
imagePath = imagepath
if isinstance(datapath, str):
dataPath = pathlib.Path(datapath)
else:
dataPath = datapath
indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')]
if len(indexList)==1:
expIndexPath = dataPath / indexList[0]
else:
print('Could not find the correct index file or there were more than one in the diretory')
expData = fio.loadData(expIndexPath)
#####################################
# Load Dictionary variables There has to be a way to dynamically add these
#####################################
background = expData['backgroundImage']
defaultFitRanges = expData['defaultFitRanges']
maxObj = expData['maxObj']
numTimePoints = expData['numTimePoints'] # number of timeponts
timerIncrement = expData['timerIncrement'] # timer increment in seconds
threshold_offset = expData['threshold_offset']
pixSize = expData['pixSize']
sensorSize = expData['sensorSize']
magnification = expData['magnification']
coarseness = expData['coarseness']
kernalerode = expData['kernalerode']
kernalopen = expData['kernalopen']
roiFiles = expData['roiFiles']
experiment_name = expData['experiment_name']
roiSavePath = dataPath / 'ODELAY Roi Data' / f'{roiID}.hdf5'
'''
The following code is to initialize data for all wells
'''
roiPath = imagePath / roiID
fileList = os.listdir(roiPath)
imageFileList = [fileName for fileName in fileList if '.mat' in fileName]
# Understand this gem of a regular expression sort.
imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
numImages = len(imageFileList)
if numTimePoints<numImages:
numTimePoints = numImages
imageInfo = {}
# Start Processing Data Here
for aI in range(numImages):
# # load New Image
imageFilePath = roiPath / imageFileList[aI]
anImage = stitchImage(imageFilePath, pixSize, magnification, background)
# TODO: Generate a thumbnail of the stitched image for use in the GUI later
imageInfo[f'{aI:03d}'] = {}
imageInfo[f'{aI:03d}']['stitchMeta'] = anImage['stitchMeta']
imageInfo[f'{aI:03d}']['index'] = aI+1
# for imType in anImage['imageLabels'].keys()
# flourImageDict = {colList[val] : val for val in range(len(colList))}
fluorImageList = [Lbl for Lbl in [*anImage['imageLabels']] if not Lbl=='Bf']
flourDict ={fluorImageList[im]: im for im in range(len(fluorImageList))}
for flourIm in fluorImageList:
threshold = thresholdImage(anImage[flourIm], threshold_offset, coarseness)
flourBw = morphImage(anImage[flourIm], kernalerode, kernalopen, threshold)
imageStats = cv2.connectedComponentsWithStats(flourBw, 8, cv2.CV_32S)
FRvl = anImage[flourIm].ravel()
MRvl = imageStats[1].ravel()
# Create a sparce matrix of the labeled connected component image
smM = csr_matrix((MRvl, [MRvl, np.arange(MRvl.shape[0])]),
shape=(imageStats[0],MRvl.shape[0]))
objIntensity = np.array(([
np.sum(FRvl[inds])
for inds in np.split(smM.indices, smM.indptr[1:-1])
]), dtype = 'uint32')
imageInfo[f'{aI:03d}'][flourIm] = {}
imageInfo[f'{aI:03d}'][flourIm]['threshold'] = threshold
imageInfo[f'{aI:03d}'][flourIm]['boundBox'] = imageStats[2]
imageInfo[f'{aI:03d}'][flourIm]['centroids'] = imageStats[3]
imageInfo[f'{aI:03d}'][flourIm]['objIntensity'] = objIntensity
fio.saveDict(roiSavePath, imageInfo)
return imageInfo
def roiMacSeg(imagepath, datapath, roiID, verbos = False):
'''
Data from Experiment Dictionary or Object
'''
if isinstance(imagepath, str):
imagePath = pathlib.Path(imagepath)
else:
imagePath = imagepath
if isinstance(datapath, str):
dataPath = pathlib.Path(datapath)
else:
dataPath = datapath
indexList = [k for k in dataPath.glob('*Index_ODELAYData.*')]
if len(indexList)==1:
expIndexPath = dataPath / indexList[0]
else:
print('Could not find the correct index file or there were more than one in the diretory')
deadDirPath = dataPath / 'DeadCells'
if not deadDirPath.exists():
deadDirPath.mkdir()
liveDirPath = dataPath / 'LiveCells'
if not liveDirPath.exists():
liveDirPath.mkdir()
expData = fio.loadData(expIndexPath)
#####################################
# Load Dictionary variables There has to be a way to dynamically add these
#####################################
background = expData['backgroundImage']
defaultFitRanges = expData['defaultFitRanges']
maxObj = expData['maxObj']
numTimePoints = expData['numTimePoints'] # number of timeponts
timerIncrement = expData['timerIncrement'] # timer increment in seconds
threshold_offset = expData['threshold_offset']
pixSize = expData['pixSize']
sensorSize = expData['sensorSize']
magnification = expData['magnification']
coarseness = expData['coarseness']
kernalerode = expData['kernalerode']
kernalopen = expData['kernalopen']
roiFiles = expData['roiFiles']
experiment_name = expData['experiment_name']
roiSavePath = dataPath / 'ODELAY Roi Data' / f'{roiID}.hdf5'
if isinstance(roiID, str):
roiLabel = roiID
elif isinstance(roiID, int):
roiList = [*roiFiles]
roiLabel = roiList[roiID]
# Else this will crash
roiPath = imagePath / roiLabel
imageFileList = os.listdir(roiPath)
# Understand this gem of a regular expression sort.
imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
numImages = len(imageFileList)
if numTimePoints<numImages:
numTimePoints = numImages
threshold = np.zeros(numTimePoints, dtype='uint16') # Array 1 x numTimePoints uint16
# imageFileList = []# List of strings
stitchMeta = {} # Dictionary or list for image stitching data
xyzTime = np.zeros((numTimePoints, 4), dtype ='float64')
timePoints = np.full( numTimePoints, 'nan', dtype='float64') # Array dbl 1 x numTimePoints double
numObj = np.zeros(numTimePoints, dtype = 'float64')
sumArea = np.zeros( numTimePoints, dtype = 'float64')
fitData = np.zeros((maxObj, 17), dtype='float64') # Dictionary array maxObj x 17 double
imageHist = np.zeros((numTimePoints, 2**16), dtype = 'uint32')
analyzeIndex = np.zeros(numTimePoints, dtype = 'bool')
xyDisp = np.zeros((numTimePoints, 4), dtype = 'int32')
prImage ={}
'''
The following code is to initialize data for all wells
'''
roiPath = imagePath / roiID
fileList = os.listdir(roiPath)
imageFileList = [fileName for fileName in fileList if '.mat' in fileName]
# Understand this gem of a regular expression sort.
imageFileList.sort(key=lambda var:[int(x) if x.isdigit() else x for x in re.findall(r'[^0-9]|[0-9]+', var)])
numImages = len(imageFileList)
if numTimePoints>numImages:
numTimePoints = numImages
imageInfo = {}
liveCnt = 0
deadCnt = 0
# Start Processing Data Here
for aI in range(numTimePoints):
# # load New Image
imageFilePath = roiPath / imageFileList[aI]
anImage = opl.stitchImage(imageFilePath, pixSize, magnification, background)
# TODO: Generate a thumbnail of the stitched image for use in the GUI later
imageInfo[f'{aI:03d}'] = {}
imageInfo[f'{aI:03d}']['stitchMeta'] = anImage['stitchMeta']
imageInfo[f'{aI:03d}']['index'] = aI+1
sobelBf = opl.SobelGradient(anImage['Bf'])
threshold = opl.thresholdImage(sobelBf, 1.2, coarseness)
imageHist[aI,:] = histogram1d(sobelBf.ravel(), 2**16, [0,2**16], weights = None).astype('uint32')
bwBf1 = np.greater(sobelBf, threshold).astype('uint8')
akernel = np.array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]], dtype='uint8')
#######
# Python Implementation
kernalerode = 4
kernalopen = 3
ekernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalerode, kernalerode))
okernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (kernalopen , kernalopen))
bwBf2 = cv2.dilate(bwBf1, ekernel, iterations = 2)
bwBf3 = cv2.erode( bwBf2, ekernel, iterations = 2)
bwBf3[1, :] = 1
bwBf3[:, 1] = 1
bwBf3[:,-1] = 1
bwBf3[-1,:] = 1
sumArea[aI] = np.sum(bwBf3)
anImage['bwBf'] = bwBf2
bfImageStats = cv2.connectedComponentsWithStats(bwBf2, 8, cv2.CV_32S)
scaledBf = scaleImage(anImage['Bf'], lowCut = 0.00001, highcut = 0.9995, scaleImage = 1)
scaledSB = scaleImage(sobelBf, lowCut = 0.00001, highcut = 0.9995, scaleImage = 1)
scaledCy5 = scaleImage(anImage['Cy5'], lowCut = 0.00001, highcut = 1, scaleImage = 1)
scaledHst = scaleImage(anImage['DAPI'], lowCut = 0.00001, highcut = 1, scaleImage = 1)
images = [scaledBf,scaledSB, anImage['bwBf'], scaledCy5, scaledHst]
titles = ['scaledBF','scaledSB', "anImage['bwBf']", 'scaledCy5', 'scaledHst']
# for i in range(5):
# plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
# plt.title(titles[i])
# plt.xticks([]),plt.yticks([])
# plt.show()
imageInfo[f'{aI:03d}']['Bf'] = {}
imageInfo[f'{aI:03d}']['Bf']['threshold'] = threshold
imageInfo[f'{aI:03d}']['Bf']['boundBox'] = bfImageStats[2] # upper left xy lower right xy
imageInfo[f'{aI:03d}']['Bf']['centroids'] = bfImageStats[3]
fluorImageList = [Lbl for Lbl in [*anImage['imageLabels']] if not Lbl=='Bf']
flourDict ={fluorImageList[im]: im for im in range(len(fluorImageList))}
for flourIm in fluorImageList:
threshold = opl.thresholdImage(anImage[flourIm], 1.3, coarseness)
flourBw = opl.morphImage(anImage[flourIm], kernalerode, kernalopen, threshold)
flImageStats = cv2.connectedComponentsWithStats(flourBw, 8, cv2.CV_32S)
FRvl = anImage[flourIm].ravel()
MRvl = flImageStats[1].ravel()
# Create a sparce matrix of the labeled connected component image
fluorPix = csr_matrix((MRvl, [MRvl, np.arange(MRvl.shape[0])]),
shape=(flImageStats[0],MRvl.shape[0]))
objIntensity = np.array(([
np.sum(FRvl[inds])
for inds in np.split(fluorPix.indices, fluorPix.indptr[1:-1])
]), dtype = 'uint32')
imageInfo[f'{aI:03d}'][flourIm] = {}
imageInfo[f'{aI:03d}'][flourIm]['threshold'] = threshold
imageInfo[f'{aI:03d}'][flourIm]['boundBox'] = flImageStats[2]
imageInfo[f'{aI:03d}'][flourIm]['centroids'] = flImageStats[3]
imageInfo[f'{aI:03d}'][flourIm]['objIntensity'] = objIntensity
# figure out if image has fluorescent centroid in image.
imToCheck = flourIm
flCents = imageInfo[f'{aI:03d}'][flourIm]['centroids']
cellBounds = imageInfo[f'{aI:03d}']['Bf']['boundBox']
centIn = np.zeros((flCents.shape[0], cellBounds.shape[0]), dtype = 'bool')
boundIn= np.zeros((flCents.shape[0], cellBounds.shape[0]), dtype = 'bool')
for row in range(flCents.shape[0]):
centIn[row,:] = checkCentroid(flCents[row,:], cellBounds, 40, 500)
for col in range(cellBounds.shape[0]):
boundIn[:,col] = checkBoundBox(flCents, cellBounds[col,:], 40, 500)
imageInfo[f'{aI:03d}'][flourIm]['centIn'] = centIn
imageInfo[f'{aI:03d}'][flourIm]['boundIn'] = boundIn
dapiCents = np.sum(imageInfo[f'{aI:03d}']['DAPI']['centIn'], axis=0)
cy5Cents = np.sum(imageInfo[f'{aI:03d}']['Cy5']['centIn'], axis=0)
singleDapi = dapiCents == 1
singleCy5 = cy5Cents == 1
deadCell = singleDapi & singleCy5
liveCell = singleDapi & ~singleCy5
deadInds = np.where(deadCell==True)
liveInds = np.where(liveCell==True)
if type(deadInds[0]) is not tuple and type(liveInds[0]) is not tuple:
imageInfo[f'{aI:03d}']['deadCellInds'] = deadInds[0]
imageInfo[f'{aI:03d}']['liveCellInds'] = liveInds[0]
deadCnt += deadInds[0].shape[0]
liveCnt += liveInds[0].shape[0]
uniqueDead = np.uinique(deadInds[0])
for ind in np.uinique(deadInds[0]):
deadImagePath = deadDirPath / f'{roiID}_{aI:03d}_{ind}.tiff'
bBox = cellBounds[ind,:]
xi = bBox[0]
xe = bBox[0]+bBox[2]
yi = bBox[1]
ye = bBox[1]+bBox[3]
saveIm = anImage['Bf'][yi:ye, xi:xe]
retVal = cv2.imwrite(str(deadImagePath), saveIm)
uniqueLive = np.uinique(liveInds[0])
for ind in np.unique(liveInds[0]):
liveImagePath = liveDirPath / f'{roiID}_{aI:03d}_{ind}.tiff'
bBox = cellBounds[ind,:]
xi = bBox[0]
xe = bBox[0]+bBox[2]
yi = bBox[1]
ye = bBox[1]+bBox[3]
saveIm = anImage['Bf'][yi:ye, xi:xe]
retVal = cv2.imwrite(str(liveImagePath), saveIm)
fio.saveDict(roiSavePath, imageInfo)
return imageInfo
def checkCentroid(cent, bB, minDim, maxDim):
# check if centroid is within all bounding boxes.
# Retruns logical index of which bounding box the centroid is in.
x1 = bB[:,0]
y1 = bB[:,1]
x2 = bB[:,0]+bB[:,2]
y2 = bB[:,1]+bB[:,3]
test1 = x1<=cent[0]
test2 = x2>=cent[0]
test3 = y1<=cent[1]
test4 = y2>=cent[1]
test5 = bB[:,2]>=minDim
test6 = bB[:,3]>=minDim
test7 = bB[:,2]<=maxDim
test8 = bB[:,3]<=maxDim
return test1 & test2 & test3 & test4 & test5 & test6 & test7 & test8
def checkBoundBox(cent, bB, minDim, maxDim):
# check if centroid is within all bounding boxes.
# Retruns logical index of which bounding box the centroid is in.
x1 = bB[0]
y1 = bB[1]
x2 = bB[0]+bB[2]
y2 = bB[1]+bB[3]
test1 = x1<=cent[:,0]
test2 = x2>=cent[:,0]
test3 = y1<=cent[:,1]
test4 = y2>=cent[:,1]
test5 = bB[2]>=minDim
test6 = bB[3]>=minDim
test7 = bB[2]<=maxDim
test8 = bB[3]<=maxDim
return test1 & test2 & test3 & test4 & test5 & test6 & test7 & test8
def refitGCs(imagepath, datapath, roiID):
return None
def gompMinBDt(x, tdata, idata):
'''
'''
Klag = np.log((3+5**0.5)/2)
a = x[0]
b = x[1]
tlag = x[2]
dT = x[3]
yn=a + b*np.exp(-np.exp((Klag/dT)*(dT+tlag-tdata)))
vals = np.nansum((yn-idata)**2)
return vals
def gompBDt(x, tdata):
'''
'''
Klag = np.log((3+5**0.5)/2)
a = x[0]
b = x[1]
tlag = x[2]
dT = x[3]
vals=a + b*np.exp(-np.exp((Klag/dT)*(dT+tlag-tdata)))
return vals
def findPrmsGompBDt(vecB, vecTlag, vecDT, tdata, adata):
'''
Corse-grid search for parameters of the Parameterized Gompertz function
-------
Input Parameters
vecB: array of B paramters to search
vecTlag: array of lag times to search
vecDT: array of DT times to search
tData: ordered array of timepoints
aData: corresponding area data
Returns array of estamate parameters
estVec[0] = a estimate
estVec[1] = B estimate
estVec[2] = lag time estimate
estVec[3] = dT or time between max velocity and lag time
'''
flag=False
estVec = np.zeros(4, dtype = 'float')
estVec[0] = np.nanmean(adata[0:5])
K = np.log((3+5**0.5)/2)
tVec = np.arange(vecTlag.shape[0])
for B in vecB:
for tp in tVec[:-1]:
tlag = vecTlag[tp]
vecDt = vecTlag[tp+1:]-vecTlag[tp]
for dT in vecDt:
yn=estVec[0]+B*np.exp(-np.exp((K/dT)*(dT+tlag-tdata)))
ifunc = np.sum((adata-yn)**2)
if (not flag) or (flag and ifunc < fmin):
fmin = ifunc
estVec[1] = B
estVec[2] = tlag
estVec[3] = dT
flag = True
return estVec
def fitGrowthCurves(timeVec, areaData, defaultRanges):
numTPs = np.sum(areaData!=0)
aData = np.log2(areaData[:numTPs])
tData = timeVec[:numTPs]
Nsteps = 40
areaMax = np.max(aData)
factor=1.05
cumsum = np.cumsum(np.insert(aData, 0, 0))
smthArea = (cumsum[5:] - cumsum[:-5]) / 5
x = tData[:-4]
y = smthArea
m = | np.diff(y[[0,-1]]) | numpy.diff |
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 8 14:12:07 2019
@Title: FrontierLab exchange program - BPS source code (for Bayesian Logistic Regression)
@Author: <NAME>
@Reference: <NAME>, <NAME>, and <NAME>, “The bouncy particle sampler: A non-reversible rejection-free markov chain monte carlo method,” 2015.
"""
import time
import numpy as np
import copy
def expit(z):
return np.exp(z) / (1 + np.exp(z))
def euclidean(z):
return np.sum(z ** 2)
class BPS:
def __init__(self, X, Y, prior_var, ref, store_skip):
# X: input
# Y: output
# prior_var : variance of prior (normal distribution with mu = 0)
# ref : refresh rate (1/ref)
# store_skip : (thinned factor, not used)
# you may adjust the sample counts by ""sample_time"" in function "sampler"
self.X = X
self.Y = Y
self.all_samples = []
self.prior_var = prior_var
self.ref = ref
self.store_skip = store_skip
self.beta = np.random.normal(0,1,2)
self.all_samples.append(self.beta)
self.v = np.random.normal(0,1,2)
self.count = 0
self.pre_beta = self.beta
self.success = X[np.where(Y == 1)]
self.failure = X[np.where(Y == 0)]
# burnin time is computer time
# not BPS clock
self.burnin_time = 0
self.burnin_sample = 0
# for storage time checking
self.all_storage_time = 0
self.after_burnin_storage_time = 0
self.after_burnin_switch = 0
def prior_local_upper_bound(self, t):
# prior local upper bound calculation
temp = np.max([0,np.dot((self.beta + self.v * t), self.v)]) * self.prior_var**-2
return temp
def constant_int(self):
# the likelihood bound
term1 = np.abs(self.v[0]) * len(self.success) * (self.v[0] < 0)
term2 = np.max([0, np.abs(self.v[1]) * np.sum(self.success)])
term3 = self.v[0] * len(self.failure) * (self.v[0] > 0)
term4 = np.max([0, self.v[1] * np.sum(self.failure)])
return np.max([0, term1 + term2 + term3 + term4])
def R_prior(self):
# velocity bounce formula for prior
nominator = np.dot(self.beta* self.prior_var**-2, self.v)
denominator = euclidean(self.beta* self.prior_var**-2)
new_v = self.v - 2*(nominator/denominator) * self.beta * self.prior_var**-2
return new_v
def g_constant(self):
# exact likelihood intensity
term_1 = np.sum(expit(self.beta[0] + self.beta[1]*self.success)) - len(self.success)
term_2 = np.sum(self.success * (expit(self.beta[0] + self.beta[1]*self.success) -1))
term_3 = np.sum(expit(self.beta[0] + self.beta[1]*self.failure))
term_4 = np.sum(self.failure * (expit(self.beta[0] + self.beta[1]*self.failure)))
gU = np.array([term_1 + term_3, term_2 + term_4])
return np.dot(gU, self.v)
def R_constant(self):
# velocity bounce formula for likelihood
term_1 = np.sum(expit(self.beta[0] + self.beta[1]*self.success)) - len(self.success)
term_2 = np.sum(self.success * (expit(self.beta[0] + self.beta[1]*self.success) -1))
term_3 = np.sum(expit(self.beta[0] + self.beta[1]*self.failure))
term_4 = np.sum(self.failure * (expit(self.beta[0] + self.beta[1]*self.failure)))
gU = np.array([term_1 + term_3, term_2 + term_4])
nominator = | np.dot(gU, self.v) | numpy.dot |
'''
MixModel.py
Bayesian parametric mixture model with fixed, finite number of components K
Attributes
-------
K : integer number of components
alpha0 : scalar parameter of symmetric Dirichlet prior on mixture weights
'''
import numpy as np
from bnpy.allocmodel import AllocModel
from bnpy.suffstats import SuffStatBag
from bnpy.util import logsumexp, np2flatstr, flatstr2np
from bnpy.util import gammaln, digamma, EPS
class MixModel(AllocModel):
######################################################### Constructors
#########################################################
def __init__(self, inferType, priorDict=dict()):
self.inferType = inferType
self.set_prior(**priorDict)
self.K = 0
def set_prior(self, alpha0=1.0, **kwargs):
self.alpha0 = alpha0
if self.alpha0 < 1.0 and self.inferType == 'EM':
raise ValueError("Cannot perform MAP inference if param alpha0 < 1")
######################################################### Accessors
#########################################################
def get_keys_for_memoized_local_params(self):
''' Return list of string names of the LP fields
that this object needs to memoize across visits to a particular batch
'''
return list()
######################################################### Local Params
#########################################################
def calc_local_params(self, Data, LP, **kwargs):
''' Calculate local parameters for each data item and each component.
This is part of the E-step.
Args
-------
Data : bnpy data object with Data.nObs observations
LP : local param dict with fields
E_log_soft_ev : Data.nObs x K array
E_log_soft_ev[n,k] = log p(data obs n | comp k)
Returns
-------
LP : local param dict with fields
resp : Data.nObs x K array whose rows sum to one
resp[n,k] = posterior responsibility that comp. k has for data n
'''
lpr = LP['E_log_soft_ev']
if self.inferType.count('VB') > 0:
lpr += self.Elogw
# Calculate exp in numerically stable manner (first subtract the max)
# perform this in-place so no new allocations occur
lpr -= np.max(lpr, axis=1)[:,np.newaxis]
np.exp(lpr, out=lpr)
# Normalize, so rows sum to one
lpr /= lpr.sum(axis=1)[:,np.newaxis]
elif self.inferType == 'EM' > 0:
lpr += | np.log(self.w) | numpy.log |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Image PROcessing
"""
from tqdm import tqdm, trange
import os
import math
import numpy as np
from scipy.io import readsav
from astropy import wcs
from astropy.io import ascii
from astropy.table import Table
from reproject import reproject_interp
import subprocess as SP
## Local
from iolib import (fitsext, csvext, ascext, fclean,
read_fits, write_fits,
# read_csv, write_csv, read_ascii,
)
from arrlib import allist, closest
from mathlib import nanavg, bsplinterpol
from astrolib import fixwcs, get_pc
savext = '.sav'
##-----------------------------------------------
## <improve> based tools
##-----------------------------------------------
class improve:
'''
IMage PROcessing VEssel
'''
def __init__(self, filIN, wmod=0, verbose=False):
'''
self: filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl
'''
## INPUTS
self.filIN = filIN
self.wmod = wmod
self.verbose = verbose
## read image/cube
## self.hdr is a 2D (reduced) header
ws = fixwcs(filIN)
self.hdr = ws.header
self.w = ws.wcs
pcdelt = get_pc(wcs=ws.wcs)
self.cdelt = pcdelt.cdelt
self.pc = pcdelt.pc
self.cd = pcdelt.cd
self.Nx = self.hdr['NAXIS1']
self.Ny = self.hdr['NAXIS2']
self.Nw = None
if verbose==True:
print('<improve> file: ', filIN)
print('Raw size (pix): {} * {}'.format(self.Nx, self.Ny))
## 3D cube slicing
ds = read_fits(filIN)
self.im = ds.data
self.wvl = ds.wave
self.Ndim = self.im.ndim
if self.Ndim==3:
if self.im.shape[0]==1:
self.Ndim = 2 # Nw=1 patch
else:
self.Nw = len(self.wvl)
def rand_norm(self, file=None, unc=None, sigma=1., mu=0.):
'''
Add random N(0,1) noise
'''
if file is not None:
unc = read_fits(file).data
if unc is not None:
## unc should have the same dimension with im
theta = np.random.normal(mu, sigma, self.im.shape)
self.im += theta * unc
return self.im
def rand_splitnorm(self, file=None, unc=None, sigma=1., mu=0.):
'''
Add random SN(0,lam,lam*tau) noise
------ INPUT ------
file 2 FITS files for unc of left & right sides
unc 2 uncertainty ndarrays
------ OUTPUT ------
'''
if file is not None:
unc = []
for f in file:
unc.append(read_fits(f).data)
if unc is not None:
## unc[i] should have the same dimension with self.im
tau = unc[1]/unc[0]
peak = 1/(1+tau)
theta = np.random.normal(mu, sigma, self.im.shape) # ~N(0,1)
flag = np.random.random(self.im.shape) # ~U(0,1)
if self.Ndim==2:
for x in range(self.Nx):
for y in range(self.Ny):
if flag[y,x]<peak[y,x]:
self.im[y,x] += -abs(theta[y,x]) * unc[0][y,x]
else:
self.im[y,x] += abs(theta[y,x]) * unc[1][y,x]
elif self.Ndim==3:
for x in range(self.Nx):
for y in range(self.Ny):
for k in range(self.Nw):
if flag[k,y,x]<peak[k,y,x]:
self.im[k,y,x] += -abs(
theta[k,y,x]) * unc[0][k,y,x]
else:
self.im[k,y,x] += abs(
theta[k,y,x]) * unc[1][k,y,x]
return self.im
def slice(self, filSL, postfix=''):
## 3D cube slicing
slist = []
if self.Ndim==3:
hdr = self.hdr.copy()
for kw in self.hdr.keys():
if '3' in kw:
del hdr[kw]
hdr['NAXIS'] = 2
for k in range(self.Nw):
## output filename list
f = filSL+'_'+'0'*(4-len(str(k)))+str(k)+postfix
slist.append(f)
write_fits(f, hdr, self.im[k,:,:]) # gauss_noise inclu
else:
print('Input file is a 2D image which cannot be sliced! ')
f = filSL+'_0000'+postfix
slist.append(f)
write_fits(f, self.hdr, self.im) # gauss_noise inclu
print('Rewritten with only random noise added (if provided).')
return slist
def slice_inv_sq(self, filSL, postfix=''):
## Inversed square cube slicing
inv_sq = 1./self.im**2
slist = []
if self.Ndim==3:
hdr = self.hdr.copy()
for kw in self.hdr.keys():
if '3' in kw:
del hdr[kw]
hdr['NAXIS'] = 2
for k in range(self.Nw):
## output filename list
f = filSL+'_'+'0'*(4-len(str(k)))+str(k)+postfix
slist.append(f)
write_fits(f, hdr, inv_sq[k,:,:]) # gauss_noise inclu
else:
f = filSL+'_0000'+postfix
slist.append(f)
write_fits(f, self.hdr, inv_sq) # gauss_noise inclu
return slist
def crop(self, filOUT=None, \
sizpix=None, cenpix=None, sizval=None, cenval=None):
'''
If pix and val co-exist, pix will be taken.
------ INPUT ------
filOUT output file
sizpix crop size in pix (dx, dy)
cenpix crop center in pix (x, y)
sizval crop size in deg (dRA, dDEC) -> (dx, dy)
cenval crop center in deg (RA, DEC) -> (x, y)
------ OUTPUT ------
self.im cropped image array
'''
## Crop center
##-------------
if cenpix is None:
if cenval is None:
raise ValueError('Crop center unavailable! ')
else:
## Convert coord
try:
cenpix = np.array(self.w.all_world2pix(cenval[0], cenval[1], 1))
except wcs.wcs.NoConvergence as e:
cenpix = e.best_solution
print("Best solution:\n{0}".format(e.best_solution))
print("Achieved accuracy:\n{0}".format(e.accuracy))
print("Number of iterations:\n{0}".format(e.niter))
else:
cenval = self.w.all_pix2world(np.array([cenpix]), 1)[0]
if not (0<cenpix[0]<self.Nx and 0<cenpix[1]<self.Ny):
raise ValueError('Crop centre overpassed image border! ')
## Crop size
##-----------
if sizpix is None:
if sizval is None:
raise ValueError('Crop size unavailable! ')
else:
## CDELTn needed (Physical increment at the reference pixel)
sizpix = np.array(sizval) / abs(self.cdelt)
sizpix = np.array([math.floor(n) for n in sizpix])
else:
sizval = np.array(sizpix) * abs(self.cdelt)
if self.verbose==True:
print('----------')
print("Crop centre (RA, DEC): [{:.8}, {:.8}]".format(*cenval))
print("Crop size (dRA, dDEC): [{}, {}]\n".format(*sizval))
print("Crop centre (x, y): [{}, {}]".format(*cenpix))
print("Crop size (dx, dy): [{}, {}]".format(*sizpix))
print('----------')
## Lowerleft origin
##------------------
xmin = math.floor(cenpix[0] - sizpix[0]/2.)
ymin = math.floor(cenpix[1] - sizpix[1]/2.)
xmax = xmin + sizpix[0]
ymax = ymin + sizpix[1]
if not (xmin>=0 and xmax<=self.Nx and ymin>=0 and ymax<=self.Ny):
raise ValueError('Crop region overpassed image border! ')
## OUTPUTS
##---------
## New image
if self.Ndim==3:
self.im = self.im[:, ymin:ymax, xmin:xmax] # gauss_noise inclu
## recover 3D non-reduced header
self.hdr = read_fits(self.filIN).header
elif self.Ndim==2:
self.im = self.im[ymin:ymax, xmin:xmax] # gauss_noise inclu
## Modify header
## Suppose no non-linear distortion
self.hdr['CRPIX1'] = sizpix[0] / 2.
self.hdr['CRPIX2'] = sizpix[1] / 2.
self.hdr['CRVAL1'] = cenval[0]
self.hdr['CRVAL2'] = cenval[1]
## Write cropped image/cube
if filOUT is not None:
# comment = "[ICROP]ped at centre: [{:.8}, {:.8}]. ".format(*cenval)
# comment = "with size [{}, {}] (pix).".format(*sizpix)
write_fits(filOUT, self.hdr, self.im, self.wvl, self.wmod)
return self.im
class islice(improve):
'''
Slice a cube
------ INPUT ------
filIN input FITS
filSL ouput path+basename
filUNC input uncertainty FITS
dist unc pdf
slicetype Default: None
None - normal slices
'inv_sq' - inversed square slices
postfix postfix of output slice names
------ OUTPUT ------
self: slist, path_tmp,
(filIN, wmod, hdr, w, cdelt, pc, cd, Ndim, Nx, Ny, Nw, im, wvl)
'''
def __init__(self, filIN, filSL=None, filUNC=None, dist='norm', \
slicetype=None, postfix=''):
super().__init__(filIN)
if filSL is None:
path_tmp = os.getcwd()+'/tmp_proc/'
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
filSL = path_tmp+'slice'
self.filSL = filSL
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
if slicetype is None:
self.slist = self.slice(filSL, postfix) # gauss_noise inclu
elif slicetype=='inv_sq':
self.slist = self.slice_inv_sq(filSL, postfix)
def image(self):
return self.im
def wave(self):
return self.wvl
def filenames(self):
return self.slist
def clean(self, file=None):
if file is not None:
fclean(file)
else:
fclean(self.filSL+'*')
class icrop(improve):
'''
CROP 2D image or 3D cube
'''
def __init__(self, filIN, filOUT=None, \
sizpix=None, cenpix=None, sizval=None, cenval=None, \
filUNC=None, dist='norm', wmod=0, verbose=False):
## slicrop: slice
super().__init__(filIN, wmod, verbose)
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
im_crop = self.crop(filOUT=filOUT, sizpix=sizpix, cenpix=cenpix, \
sizval=sizval, cenval=cenval) # gauss_noise inclu
def image(self):
return self.im
def wave(self):
return self.wvl
class imontage(improve):
'''
2D image or 3D cube montage toolkit
i means <improve>-based or initialize
------ INPUT ------
file FITS file (list, cf improve.filIN)
filREF ref file (priority if co-exist with input header)
hdREF ref header
fmod output image frame mode
'ref' - same as ref frame (Default)
'rec' - recenter back to input frame
'ext' - cover both input and ref frame
ext_pix number of pixels to extend to save edge
tmpdir tmp file path
------ OUTPUT ------
'''
def __init__(self, file, filREF=None, hdREF=None, \
fmod='ref', ext_pix=0, tmpdir=None):
'''
self: hdr_ref, path_tmp,
(filIN, wmod, hdr, w, Ndim, Nx, Ny, Nw, im, wvl)
'''
## Set path of tmp files
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_proc/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
self.path_tmp = path_tmp
## Inputs
self.file = file
self.filREF = filREF
self.hdREF = hdREF
self.fmod = fmod
self.ext_pix = ext_pix
## Init ref header
self.hdr_ref = None
def make_header(self, filIN, filREF=None, hdREF=None, fmod='ref', ext_pix=0):
'''
Make header tool
------ INPUT ------
filIN single FITS file
'''
super().__init__(filIN)
## Prepare reprojection header
if filREF is not None:
hdREF = read_fits(filREF).header
# hdREF['EQUINOX'] = 2000.0
if hdREF is not None:
## Frame mode (fmod) options
##---------------------------
if fmod=='ref':
pass
else:
## Input WCS (old)
pix_old = [[0, 0]]
pix_old.append([0, self.Ny])
pix_old.append([self.Nx, 0])
pix_old.append([self.Nx, self.Ny])
world_arr = self.w.all_pix2world(np.array(pix_old), 1)
## Ref WCS (new)
w = fixwcs(header=hdREF).wcs
try:
pix_new = w.all_world2pix(world_arr, 1)
except wcs.wcs.NoConvergence as e:
pix_new = e.best_solution
print("Best solution:\n{0}".format(e.best_solution))
print("Achieved accuracy:\n{0}".format(e.accuracy))
print("Number of iterations:\n{0}".format(e.niter))
xmin = min(pix_new[:,0])
xmax = max(pix_new[:,0])
ymin = min(pix_new[:,1])
ymax = max(pix_new[:,1])
## Modify ref header
if fmod=='rec':
hdREF['CRPIX1'] += -xmin
hdREF['CRPIX2'] += -ymin
hdREF['NAXIS1'] = math.ceil(xmax - xmin)
hdREF['NAXIS2'] = math.ceil(ymax - ymin)
elif fmod=='ext':
if xmin<0:
hdREF['CRPIX1'] += -xmin
if ymin<0:
hdREF['CRPIX2'] += -ymin
hdREF['NAXIS1'] = math.ceil(max(xmax, hdREF['NAXIS1']-xmin, \
xmax-xmin, hdREF['NAXIS1'])) + ext_pix # save edges
hdREF['NAXIS2'] = math.ceil(max(ymax, hdREF['NAXIS2']-ymin, \
ymax-ymin, hdREF['NAXIS2'])) + ext_pix
## Save hdREF
self.hdr_ref = hdREF
## Test hdREF (Quick check: old=new or old<new)
# w_new = fixwcs(header=hdREF).wcs
# print('old: ', w.all_world2pix(
# self.hdr['CRVAL1'], self.hdr['CRVAL2'], 1))
# print('new: ', w_new.all_world2pix(
# self.hdr['CRVAL1'], self.hdr['CRVAL2'], 1))
# exit()
else:
raise ValueError('Cannot find projection reference! ')
def make(self):
'''
Preparation (make header)
'''
file = self.file
filREF = self.filREF
hdREF = self.hdREF
fmod = self.fmod
ext_pix = self.ext_pix
if isinstance(file, str):
self.make_header(file, filREF, hdREF, fmod, ext_pix)
elif isinstance(file, list):
self.make_header(file[0], filREF, hdREF, fmod, ext_pix)
if fmod=='ext':
## Refresh self.hdr_ref in every circle
for f in file:
self.make_header(filIN=f, filREF=None, \
hdREF=self.hdr_ref, fmod='ext', ext_pix=ext_pix)
tqdm.write('<imontage> Making ref header...[done]')
return self.hdr_ref
def footprint(self, filOUT=None):
'''
Save reprojection footprint
'''
if filOUT is None:
filOUT = self.path_tmp+'footprint'
Nx = self.hdr_ref['NAXIS1']
Ny = self.hdr_ref['NAXIS2']
im_fp = np.ones((Ny, Nx))
comment = "<imontage> footprint"
write_fits(filOUT, self.hdr_ref, im_fp, COMMENT=comment)
return im_fp
def reproject(self, filIN, filOUT=None, \
filUNC=None, dist='norm', postfix=''):
'''
Reproject 2D image or 3D cube
------ INPUT ------
filIN single FITS file to reproject
filOUT output FITS file
filUNC unc files
dist uncertainty distribution
'norm' - N(0,1)
'splitnorm' - SN(0,lam,lam*tau)
postfix
------ OUTPUT ------
'''
super().__init__(filIN)
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
## Set reprojection tmp path
##---------------------------
filename = os.path.basename(filIN)
rep_tmp = self.path_tmp+filename+postfix+'/'
if not os.path.exists(rep_tmp):
os.makedirs(rep_tmp)
self.slist = self.slice(rep_tmp+'slice', '_') # gauss_noise inclu
## Do reprojection
##-----------------
cube_rep = []
# for k in range(self.Nw):
# hdr = self.hdr.copy()
# for kw in self.hdr.keys():
# if '3' in kw:
# del hdr[kw]
# hdr['NAXIS'] = 2
# phdu = fits.PrimaryHDU(header=hdr, data=self.im[k,:,:])
# im_rep = reproject_interp(phdu, self.hdr_ref)[0]
for s in self.slist:
im_rep = reproject_interp(s+fitsext, self.hdr_ref)[0]
cube_rep.append(im_rep)
write_fits(s+'rep_', self.hdr_ref, im_rep)
fclean(s+fitsext)
self.im = np.array(cube_rep)
comment = "Reprojected by <imontage>. "
if filOUT is None:
filOUT = self.path_tmp+filename+postfix+'_rep'
self.file_rep = filOUT
write_fits(filOUT, self.hdr_ref, self.im, self.wvl, wmod=0, \
COMMENT=comment)
return self.im
def reproject_mc(self, filIN, filUNC, Nmc=0, dist='norm'):
'''
Generate Monte-Carlo uncertainties for reprojected input file
'''
dataset = type('', (), {})()
hyperim = [] # [j,(w,)y,x]
for j in trange(Nmc+1, leave=False, \
desc='<imontage> Reprojection (MC level)'):
if j==0:
im0 = self.reproject(filIN, \
filUNC=None, dist=dist)
file_rep = self.file_rep
else:
hyperim.append(self.reproject(filIN, \
filUNC=filUNC, dist=dist, postfix='_'+str(j)))
im0 = np.array(im0)
hyperim = np.array(hyperim)
unc = np.nanstd(hyperim, axis=0)
comment = "An <imontage> production"
write_fits(file_rep+'_unc', self.hdr_ref, unc, self.wvl, \
COMMENT=comment)
dataset.im0 = im0
dataset.unc = unc
dataset.hyperim = hyperim
return dataset
def combine(self, file, filOUT=None, method='avg', \
filUNC=None, do_rep=True, Nmc=0, dist='norm'):
'''
Stitching input files (with the same wavelengths) to the ref WCS
If filUNC is None, no MC
If Nmc==0, no MC
'''
dataset = type('', (), {})()
wvl = read_fits(file[0]).wave
dataset.wvl = wvl
superim0 = [] # [i,(w,)y,x]
superunc = [] # [i,(w,)y,x]
superim = [] # [i,j,(w,)y,x]
Nf = np.size(file)
for i in trange(Nf, leave=False, \
desc='<imontage> Reprojection (file level)'):
## (Re)do reprojection
##---------------------
if do_rep==True:
## With MC
if filUNC is not None:
im0, unc, hyperim = self.reproject_mc(file[i], filUNC[i], \
Nmc=Nmc, dist=dist)
superunc.append(unc)
superim.append(hyperim)
## Without MC
else:
im0 = self.reproject(file[i])
superim0.append(im0)
## Read archives
##---------------
else:
filename = os.path.basename(file[i])
file_rep = self.path_tmp+filename+'_rep'
if filUNC is not None:
hyperim = [] # [j,(w,)y,x]
for j in range(Nmc+1):
if j==0:
superunc.append(read_fits(file_rep+'_unc').data)
else:
file_rep = self.path_tmp+filename+'_'+str(j)+'_rep'
hyperim.append(read_fits(file_rep).data)
hyperim = np.array(hyperim)
superim.append(hyperim)
superim0.append(read_fits(file_rep).data)
superim0 = np.array(superim0)
superunc = np.array(superunc)
superim = np.array(superim)
## Combine images
##----------------
## Think about using 'try - except'
if filUNC is not None:
inv_var = 1./superunc**2
hyperim_comb = []
for j in trange(Nmc+1, leave=False, \
desc='<imontage> Stitching'):
if j==0:
if method=='avg':
im0_comb = nanavg(superim0, axis=0)
elif method=='wgt_avg':
im0_comb = nanavg(superim0, axis=0, weights=inv_var)
else:
if method=='avg':
hyperim_comb.append(nanavg(superim[:,j-1], axis=0))
elif method=='wgt_avg':
hyperim_comb.append(
nanavg(superim[:,j-1], axis=0, weights=inv_var))
hyperim_comb = np.array(hyperim_comb)
unc_comb = np.nanstd(hyperim_comb)
else:
## If no unc, inverse variance weighted mean not available
im0_comb = nanavg(superim0, axis=0)
if filOUT is not None:
comment = "An <imontage> production"
write_fits(filOUT, self.hdr_ref, im0_comb, wvl, \
COMMENT=comment)
write_fits(filOUT+'_unc', self.hdr_ref, im0_comb, wvl, \
COMMENT=comment)
dataset.im0_comb = im0_comb
dataset.unc_comb = unc_comb
dataset.hyperim_comb = hyperim_comb
dataset.superim0 = superim0
dataset.superunc = superunc
dataset.superim = superim
tqdm.write('<imontage> Combining images...[done]')
return dataset
def clean(self, file=None):
if file is not None:
fclean(file)
else:
fclean(self.path_tmp)
class iswarp(improve):
'''
SWarp drop-in image montage toolkit
i means <improve>-based
In competetion with its fully Python-based twin <imontage>
------ INPUT ------
filIN ref FITS files used to make header (footprint)
center center of output image frame
None - contains all input fields
str('hh:mm:ss,dd:mm:ss') - manual input RA,DEC
pixscale pixel scale (arcsec)
None - median of pixscale at center input frames
float() - in arcseconds
verbose default: True
tmpdir tmp file path
------ OUTPUT ------
coadd.fits
By default, SWarp reprojects all input to a WCS with diag CD matrix.
"To implement the unusual output features required,
one must write a coadd.head ASCII file that contains
a custom anisotropic scaling matrix. "
'''
def __init__(self, filIN=None, refheader=None,
center=None, pixscale=None,
verbose=False, tmpdir=None):
'''
self: path_tmp, verbose
(filIN, wmod, hdr, w, Ndim, Nx, Ny, Nw, im, wvl)
'''
if verbose==False:
devnull = open(os.devnull, 'w')
else:
devnull = None
self.verbose = verbose
self.devnull = devnull
## Set path of tmp files
if tmpdir is None:
path_tmp = os.getcwd()+'/tmp_swp/'
else:
path_tmp = tmpdir
if not os.path.exists(path_tmp):
os.makedirs(path_tmp)
self.path_tmp = path_tmp
fclean(path_tmp+'coadd*') # remove previous coadd.fits/.head
if filIN is None:
if refheader is None:
raise ValueError('No input!')
else:
self.refheader = refheader
else:
## Input files in list format
filIN = allist(filIN)
## Images
image_files = ' '
for i in range(len(filIN)):
image = read_fits(filIN[i]).data
hdr = fixwcs(filIN[i]).header
file_ref = filIN[i]
if image.ndim==3:
## Extract 1st frame of the cube
file_ref = path_tmp+os.path.basename(filIN[i])+'_ref'
write_fits(file_ref, hdr, image[0])
image_files += file_ref+fitsext+' ' # input str for SWarp
## Create config file
SP.call('swarp -d > swarp.cfg', \
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
## Config param list
swarp_opt = ' -c swarp.cfg -SUBTRACT_BACK N -IMAGEOUT_NAME coadd.ref.fits '
if center is not None:
swarp_opt += ' -CENTER_TYPE MANUAL -CENTER '+center
if pixscale is not None:
swarp_opt += ' -PIXELSCALE_TYPE MANUAL -PIXEL_SCALE '+str(pixscale)
if verbose=='quiet':
swarp_opt += ' -VERBOSE_TYPE QUIET '
## Run SWarp
SP.call('swarp '+swarp_opt+image_files, \
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
## Save ref header
if refheader is None:
self.refheader = read_fits(path_tmp+'coadd.ref').header
else:
self.refheader = refheader
# fclean(path_tmp+'*_ref.fits')
def footprint(self, filOUT=None):
'''
Save reprojection footprint
'''
if filOUT is None:
filOUT = self.path_tmp+'footprint'
Nx = self.refheader['NAXIS1']
Ny = self.refheader['NAXIS2']
im_fp = np.ones((Ny, Nx))
comment = "<iswarp> footprint"
write_fits(filOUT, self.refheader, im_fp, COMMENT=comment)
return im_fp
def combine(self, file, combtype='med', \
keepedge=True, uncpdf=None, filOUT=None, tmpdir=None):
'''
Combine
------ INPUT ------
file input FITS files should have the same wvl
combtype combine type
med - median
avg - average
wgt_avg - inverse variance weighted average
keepedge default: False
uncpdf add uncertainties (filename+'_unc.fits' needed)
filOUT output FITS file
------ OUTPUT ------
coadd.head key for SWarp (inherit self.refheader)
'''
ds = type('', (), {})()
verbose = self.verbose
devnull = self.devnull
path_tmp = self.path_tmp
if tmpdir is None:
path_comb = path_tmp+'comb/'
else:
path_comb = tmpdir
if not os.path.exists(path_comb):
os.makedirs(path_comb)
## Input files in list format
file = allist(file)
## Header
##--------
with open(path_tmp+'coadd.head', 'w') as f:
f.write(str(self.refheader))
## Images and weights
##--------------------
Nf = len(file)
imshape = read_fits(file[0]).data.shape
if len(imshape)==3:
Nw = imshape[0]
wvl = read_fits(file[0]).wave
else:
Nw = 1
wvl = None
## Build imlist & wgtlist (size=Nf)
imlist = []
wgtlist = []
for i in range(Nf):
filename = os.path.basename(file[i])
## Set slice file
file_slice = path_comb+filename
## Slice
super().__init__(file[i])
if uncpdf=='norm':
self.rand_norm(file[i]+'_unc')
elif uncpdf=='splitnorm':
self.rand_splitnorm([file[i]+'_unc_N', file[i]+'_unc_P'])
imlist.append(self.slice(file_slice, ''))
if combtype=='wgt_avg':
super().__init__(file[i]+'_unc')
wgtlist.append(self.slice_inv_sq(file_slice, '.weight'))
## Build image_files & weight_files (size=Nw)
image_files = [' ']*Nw
weight_files = [' ']*Nw
## Let's SWarp
##-------------
hyperimage = []
for k in trange(Nw, leave=False,
desc='<iswarp> Combining (by wvl)'):
for i in range(Nf):
image_files[k] += imlist[i][k]+fitsext+' '
if combtype=='wgt_avg':
weight_files[k] += wgtlist[i][k]+fitsext+' '
## Create config file
SP.call('swarp -d > swarp.cfg', \
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
## Config param list
swarp_opt = ' -c swarp.cfg -SUBTRACT_BACK N '
if combtype=='med':
pass
elif combtype=='avg':
swarp_opt += ' -COMBINE_TYPE AVERAGE '
elif combtype=='wgt_avg':
swarp_opt += ' -COMBINE_TYPE WEIGHTED '
swarp_opt += ' -WEIGHT_TYPE MAP_WEIGHT '
swarp_opt += ' -WEIGHT_SUFFIX .weight.fits '
# swarp_opt += ' -WEIGHT_IMAGE '+weight_files[k] # not worked
if verbose=='quiet':
swarp_opt += ' -VERBOSE_TYPE QUIET '
## Run SWarp
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE LANCZOS3 '+image_files[k], \
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
coadd = read_fits(path_tmp+'coadd')
newimage = coadd.data
newheader = coadd.header
## Add back in the edges because LANCZOS3 kills the edges
## Do it in steps of less and less precision
if keepedge==True:
oldweight = read_fits(path_tmp+'coadd.weight').data
if np.sum(oldweight==0)!=0:
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE LANCZOS2 '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
edgeimage = read_fits(path_tmp+'coadd').data
newweight = read_fits(path_tmp+'coadd.weight').data
edgeidx = np.ma.array(oldweight,
mask=np.logical_and(oldweight==0, newweight!=0)).mask
if edgeidx.any():
newimage[edgeidx] = edgeimage[edgeidx]
oldweight = read_fits(path_tmp+'coadd.weight').data
if np.sum(oldweight==0)!=0:
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE BILINEAR '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
edgeimage = read_fits(path_tmp+'coadd').data
newweight = read_fits(path_tmp+'coadd.weight').data
edgeidx = np.ma.array(oldweight,
mask=np.logical_and(oldweight==0, newweight!=0)).mask
if edgeidx.any():
newimage[edgeidx] = edgeimage[edgeidx]
oldweight = read_fits(path_tmp+'coadd.weight').data
if np.sum(oldweight==0)!=0:
SP.call('swarp '+swarp_opt+' -RESAMPLING_TYPE NEAREST '+image_files[k],
shell=True, cwd=path_tmp, stdout=devnull, stderr=SP.STDOUT)
edgeimage = read_fits(path_tmp+'coadd').data
newweight = read_fits(path_tmp+'coadd.weight').data
edgeidx = np.ma.array(oldweight,
mask=np.logical_and(oldweight==0, newweight!=0)).mask
if edgeidx.any():
newimage[edgeidx] = edgeimage[edgeidx]
## Astrometric flux-rescaling based on the local ratio of pixel scale
## Complementary for lack of FITS kw 'FLXSCALE'
## Because SWarp is conserving surface brightness/pixel
oldcdelt = get_pc(wcs=fixwcs(file[i]).wcs).cdelt
newcdelt = get_pc(wcs=fixwcs(path_tmp+'coadd').wcs).cdelt
old_pixel_fov = abs(oldcdelt[0]*oldcdelt[1])
new_pixel_fov = abs(newcdelt[0]*newcdelt[1])
newimage = newimage * old_pixel_fov/new_pixel_fov
ma_zero = np.ma.array(newimage, mask=(newimage==0)).mask
newimage[ma_zero] = np.nan
# write_fits(path_comb+'coadd_'+str(k), newheader, newimage)
# tqdm.write(str(old_pixel_fov))
# tqdm.write(str(new_pixel_fov))
# tqdm.write(str(abs(newheader['CD1_1']*newheader['CD2_2'])))
hyperimage.append(newimage)
hyperimage = np.array(hyperimage)
if filOUT is not None:
write_fits(filOUT, newheader, hyperimage, wvl)
ds.header = newheader
ds.image = hyperimage
ds.wvl = wvl
return ds
def clean(self, file=None):
if file is not None:
fclean(file)
else:
fclean(self.path_tmp)
class iconvolve(improve):
'''
Convolve 2D image or 3D cube with given kernels
i means <improve>-based or IDL-based
------ INPUT ------
filIN input FITS file
kfile convolution kernel(s) (tuple or list)
klist CSV file storing kernel names
filUNC unc file (add gaussian noise)
psf PSF list
convdir do_conv path (Default: None -> filIN path)
filOUT output file
------ OUTPUT ------
'''
def __init__(self, filIN, kfile, klist, \
filUNC=None, dist='norm', psf=None, convdir=None, filOUT=None):
## INPUTS
super().__init__(filIN)
if dist=='norm':
self.rand_norm(filUNC)
elif dist=='splitnorm':
self.rand_splitnorm(filUNC)
## Input kernel file in list format
self.kfile = allist(kfile)
## doc (csv) file of kernel list
self.klist = klist
self.path_conv = convdir
self.filOUT = filOUT
## INIT
if psf is None:
self.psf = [2.,2.5,3.,3.5,4.,4.5,5.,5.5,6.]
else:
self.psf = psf
self.sigma_lam = None
def spitzer_irs(self):
'''
Spitzer/IRS PSF profil
[ref]
Pereira-Santaella, Miguel, <NAME>, <NAME>.
Rieke, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. “Local Luminous
Infrared Galaxies. I. Spatially Resolved Observations with the
Spitzer Infrared Spectrograph.” The Astrophysical Journal
Supplement Series 188, no. 2 (June 1, 2010): 447.
doi:10.1088/0067-0049/188/2/447.
'''
sim_par_wave = [0, 13.25, 40.]
sim_par_fwhm = [2.8, 3.26, 10.1]
sim_per_wave = [0, 15.5, 40.]
sim_per_fwhm = [3.8, 3.8, 10.1]
## fwhm (arcsec)
fwhm_par = np.interp(self.wvl, sim_par_wave, sim_par_fwhm)
fwhm_per = np.interp(self.wvl, sim_per_wave, sim_per_fwhm)
#fwhm_lam = np.sqrt(fwhm_par * fwhm_per)
## sigma (arcsec)
sigma_par = fwhm_par / (2. * np.sqrt(2.*np.log(2.)))
sigma_per = fwhm_per / (2. * np.sqrt(2.*np.log(2.)))
self.sigma_lam = np.sqrt(sigma_par * sigma_per)
# def choker(self, file):
# ## <NAME>(s)
# lst = []
# for i, image in enumerate(file):
# ## check PSF profil (or is not a cube)
# if self.sigma_lam is not None:
# image = file[i]
# ind = closest(self.psf, self.sigma_lam[i])
# kernel = self.kfile[ind]
# else:
# image = file[0]
# kernel = self.kfile[0]
# ## lst line elements: image, kernel
# k = [image, kernel]
# lst.append(k)
# ## write csv file
# write_csv(self.klist, header=['Images', 'Kernels'], dset=lst)
def choker(self, file):
## CHOose KERnel(s)
image = []
kernel = []
for i, filim in enumerate(file):
## check PSF profil (or is not a cube)
if self.sigma_lam is not None:
image.append(filim)
ind = closest(self.psf, self.sigma_lam[i])
kernel.append(self.kfile[ind])
else:
image.append(file[0])
kernel.append(self.kfile[0])
## write csv file
dataset = Table([image, kernel], names=['Images', 'Kernels'])
ascii.write(dataset, self.klist+csvext, format='csv')
def do_conv(self, idldir, verbose=False):
'''
------ INPUT ------
idldir path of IDL routines
------ OUTPUT ------
'''
if verbose==False:
devnull = open(os.devnull, 'w')
else:
devnull = None
filename = os.path.basename(self.filIN)
if self.Ndim==3:
if self.path_conv is not None:
f2conv = self.slice(self.path_conv+filename) # gauss_noise inclu
else:
f2conv = self.slice(self.filIN) # gauss_noise inclu
self.spitzer_irs()
else:
f2conv = [self.filIN]
self.choker(f2conv)
SP.call('idl conv.pro', \
shell=True, cwd=idldir, stdout=devnull, stderr=SP.STDOUT)
## OUTPUTS
##---------
if self.Ndim==3:
im = []
self.slist = []
for f in f2conv:
im.append(read_fits(f+'_conv').data)
self.slist.append(f+'_conv')
self.convim = np.array(im)
## recover 3D header cause the lost of WCS due to PS3_0='WCS-TAB'
# self.hdr = read_fits(self.filIN).header
else:
self.convim = read_fits(self.filIN+'_conv').data
if self.filOUT is not None:
comment = "Convolved by G. Aniano's IDL routine."
write_fits(self.filOUT, self.hdr, self.convim, self.wvl,
COMMENT=comment)
def image(self):
return self.convim
def wave(self):
return self.wvl
def filenames(self):
return self.slist
def clean(self, file=None):
if file is not None:
fclean(file)
else:
if self.path_conv is not None:
fclean(self.path_conv)
class sextract(improve):
'''
AKARI/IRC spectroscopy slit coord extraction
s means slit, spectral cube or SAV file
------ INPUT ------
filOUT output FITS file
pathobs path of IRC dataset
parobs[0] observation id
parobs[1] slit name
parobs[2] IRC N3 (long exp) frame (2MASS corrected; 90 deg rot)
Nw num of wave
Ny slit length
Nx slit width
------ OUTPUT ------
'''
def __init__(self, pathobs=None, parobs=None, verbose=False):
self.path = pathobs + parobs[0] + '/irc_specred_out_' + parobs[1]+'/'
filIN = self.path + parobs[2]
super().__init__(filIN)
self.filSAV = self.path + parobs[0] + '.N3_NG.IRC_SPECRED_OUT'
self.table = readsav(self.filSAV+savext, python_dict=True)['source_table']
## Slit width will be corrected during reprojection
if parobs[1]=='Ns':
self.slit_width = 3 # 5"/1.446" = 3.458 pix (Ns)
elif parobs[1]=='Nh':
self.slit_width = 2 # 3"/1.446" = 2.075 pix (Nh)
if verbose==True:
print('\n----------')
print('Slit extracted from ')
print('obs_id: {} \nslit: {}'.format(parobs[0], parobs[1]))
print('----------\n')
def rand_pointing(self, sigma=0.):
'''
Add pointing uncertainty to WCS
------ INPUT ------
sigma pointing accuracy (deg)
------ OUTPUT ------
'''
d_ro = abs(np.random.normal(0., sigma)) # N(0,sigma)
d_phi = np.random.random() *2. * np.pi # U(0,2pi)
self.hdr['CRVAL1'] += d_ro * np.cos(d_phi)
self.hdr['CRVAL2'] += d_ro * np.sin(d_phi)
return d_ro, d_phi
def spec_build(self, filOUT=None, write_unc=True, Ny=32, Nsub=1, sig_pt=0.):
'''
Build the spectral cube/slit from spectra extracted by IDL pipeline
(see IRC_SPEC_TOOL, plot_spec_with_image)
------ INPUT ------
Ny number of pixels in spatial direction (Max=32)
Y axis in N3 frame (X axis in focal plane arrays)
Nsub number of subslits
'''
Nx = self.slit_width
ref_x = self.table['image_y'][0] # slit ref x
ref_y = 512 - self.table['image_x'][0] # slit ref y
## Get slit coord from 2MASS corrected N3 frame
## Do NOT touch self.im (N3 frame, 2D) before this step
self.crop(sizpix=(Nx, Ny), cenpix=(ref_x, ref_y))
# self.hdr['CTYPE3'] = 'WAVE-TAB'
self.hdr['CUNIT1'] = 'deg'
self.hdr['CUNIT2'] = 'deg'
self.hdr['BUNIT'] = 'MJy/sr'
self.hdr['EQUINOX'] = 2000.0
## Add pointing unc
self.rand_pointing(sig_pt)
## Read spec
spec_arr = []
for i in range(Ny):
## Ny/Nsub should be integer, or there will be shift
ispec = math.floor(i / (math.ceil(Ny/Nsub)))
# spec_arr.append(read_ascii(self.path+'Y12spec'+str(ispec), '.spc', float))
spec_arr.append(ascii.read(self.path+'Y12spec'+str(ispec)+'.spc'))
spec_arr = np.array(spec_arr)
Nw = len(spec_arr[0,:,0])
## Broaden cube width
cube = | np.empty([Nw,Ny,Nx]) | numpy.empty |
import pyinduct as pi
import numpy as np
import sympy as sp
import time
import os
import pyqtgraph as pg
import matplotlib.pyplot as plt
from pyinduct.visualization import PgDataPlot, get_colors
# matplotlib configuration
plt.rcParams.update({'text.usetex': True})
def pprint(expression="\n\n\n"):
if isinstance(expression, np.ndarray):
expression = sp.Matrix(expression)
sp.pprint(expression, num_columns=180)
def get_primal_eigenvector(according_paper=False):
if according_paper:
# some condensed parameters
alpha = beta = sym.c / 2
tau0 = 1 / sp.sqrt(sym.a * sym.b)
w = tau0 * sp.sqrt((sym.lam + alpha) ** 2 - beta ** 2)
# matrix exponential
expm_A = sp.Matrix([
[sp.cosh(w * sym.z),
(sym.lam + sym.c) / sym.b / w * sp.sinh(w * sym.z)],
[sym.lam / sym.a / w * sp.sinh(w * sym.z),
sp.cosh(w * sym.z)]
])
else:
# matrix
A = sp.Matrix([[sp.Float(0), (sym.lam + sym.c) / sym.b],
[sym.lam/sym.a, sp.Float(0)]])
# matrix exponential
expm_A = sp.exp(A * sym.z)
# inital values at z=0 (scaled by xi(s))
phi0 = sp.Matrix([[sp.Float(1)], [sym.lam / sym.d]])
# solution
phi = expm_A * phi0
return phi
def plot_eigenvalues(eigenvalues, return_figure=False):
plt.figure(facecolor="white")
plt.scatter(np.real(eigenvalues), | np.imag(eigenvalues) | numpy.imag |
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
"""
Defines a set Interface input and output of TVB.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from tvb.simulator.monitors import Raw, NArray, Float
from tvb.simulator.history import NDArray,Dim
import numpy
class Interface_co_simulation(Raw):
id_proxy = NArray(
dtype=numpy.int,
label="Identifier of proxies",
)
time_synchronize = Float(
label="simulated time between receiving the value of the proxy",
)
def __init__(self, **kwargs):
super(Interface_co_simulation, self).__init__(**kwargs)
def config_for_sim(self, simulator):
# configuration of all monitor
super(Interface_co_simulation, self).config_for_sim(simulator)
# add some internal variable
self._id_node = \
numpy.where(numpy.logical_not(numpy.isin(numpy.arange(0, simulator.number_of_nodes, 1), self.id_proxy)))[0]
self._nb_step_time = | numpy.int(self.time_synchronize / simulator.integrator.dt) | numpy.int |
"""Unit tests for pair module."""
import json
import tempfile
import unittest
import numpy
import relentless
class test_PairParameters(unittest.TestCase):
"""Unit tests for relentless.pair.PairParameters"""
def test_init(self):
"""Test creation from data"""
types = ('A','B')
pairs = (('A','B'), ('B','B'), ('A','A'))
params = ('energy', 'mass')
#test construction with tuple input
m = relentless.potential.PairParameters(types=('A','B'), params=('energy','mass'))
self.assertEqual(m.types, types)
self.assertEqual(m.params, params)
self.assertCountEqual(m.pairs, pairs)
#test construction with list input
m = relentless.potential.PairParameters(types=['A','B'], params=('energy','mass'))
self.assertEqual(m.types, types)
self.assertEqual(m.params, params)
self.assertCountEqual(m.pairs, pairs)
#test construction with mixed tuple/list input
m = relentless.potential.PairParameters(types=('A','B'), params=['energy','mass'])
self.assertEqual(m.types, types)
self.assertEqual(m.params, params)
self.assertCountEqual(m.pairs, pairs)
#test construction with int type parameters
with self.assertRaises(TypeError):
m = relentless.potential.PairParameters(types=('A','B'), params=(1,2))
#test construction with mixed type parameters
with self.assertRaises(TypeError):
m = relentless.potential.PairParameters(types=('A','B'), params=('1',2))
def test_param_types(self):
"""Test various get and set methods on pair parameter types"""
m = relentless.potential.PairParameters(types=('A','B'), params=('energy', 'mass'))
self.assertEqual(m.shared['energy'], None)
self.assertEqual(m.shared['mass'], None)
self.assertEqual(m['A','A']['energy'], None)
self.assertEqual(m['A','A']['mass'], None)
self.assertEqual(m['A','B']['energy'], None)
self.assertEqual(m['A','B']['mass'], None)
self.assertEqual(m['B','B']['energy'], None)
self.assertEqual(m['B','B']['mass'], None)
self.assertEqual(m['A']['energy'], None)
self.assertEqual(m['A']['mass'], None)
self.assertEqual(m['B']['energy'], None)
self.assertEqual(m['B']['mass'], None)
#test setting shared params
m.shared.update(energy=1.0, mass=2.0)
self.assertEqual(m.shared['energy'], 1.0)
self.assertEqual(m.shared['mass'], 2.0)
self.assertEqual(m['A','A']['energy'], None)
self.assertEqual(m['A','A']['mass'], None)
self.assertEqual(m['A','B']['energy'], None)
self.assertEqual(m['A','B']['mass'], None)
self.assertEqual(m['B','B']['energy'], None)
self.assertEqual(m['B','B']['mass'], None)
self.assertEqual(m['A']['energy'], None)
self.assertEqual(m['A']['mass'], None)
self.assertEqual(m['B']['energy'], None)
self.assertEqual(m['B']['mass'], None)
#test setting per-pair params
m['A','A'].update(energy=1.5, mass=2.5)
m['A','B'].update(energy=2.0, mass=3.0)
m['B','B'].update(energy=0.5, mass=0.7)
self.assertEqual(m.shared['energy'], 1.0)
self.assertEqual(m.shared['mass'], 2.0)
self.assertEqual(m['A','A']['energy'], 1.5)
self.assertEqual(m['A','A']['mass'], 2.5)
self.assertEqual(m['A','B']['energy'], 2.0)
self.assertEqual(m['A','B']['mass'], 3.0)
self.assertEqual(m['B','B']['energy'], 0.5)
self.assertEqual(m['B','B']['mass'], 0.7)
self.assertEqual(m['A']['energy'], None)
self.assertEqual(m['A']['mass'], None)
self.assertEqual(m['B']['energy'], None)
self.assertEqual(m['B']['mass'], None)
#test setting per-type params
m['A'].update(energy=0.1, mass=0.2)
m['B'].update(energy=0.2, mass=0.1)
self.assertEqual(m.shared['energy'], 1.0)
self.assertEqual(m.shared['mass'], 2.0)
self.assertEqual(m['A','A']['energy'], 1.5)
self.assertEqual(m['A','A']['mass'], 2.5)
self.assertEqual(m['A','B']['energy'], 2.0)
self.assertEqual(m['A','B']['mass'], 3.0)
self.assertEqual(m['B','B']['energy'], 0.5)
self.assertEqual(m['B','B']['mass'], 0.7)
self.assertEqual(m['A']['energy'], 0.1)
self.assertEqual(m['A']['mass'], 0.2)
self.assertEqual(m['B']['energy'], 0.2)
self.assertEqual(m['B']['mass'], 0.1)
class LinPot(relentless.potential.PairPotential):
"""Linear potential function used to test relentless.potential.PairPotential"""
def __init__(self, types, params):
super().__init__(types, params)
def _energy(self, r, m, **params):
r,u,s = self._zeros(r)
u[:] = m*r
if s:
u = u.item()
return u
def _force(self, r, m, **params):
r,f,s = self._zeros(r)
f[:] = -m
if s:
f = f.item()
return f
def _derivative(self, param, r, **params):
r,d,s = self._zeros(r)
if param == 'm':
d[:] = r
if s:
d = d.item()
return d
class TwoVarPot(relentless.potential.PairPotential):
"""Mock potential function used to test relentless.potential.PairPotential.derivative"""
def __init__(self, types, params):
super().__init__(types, params)
def _energy(self, r, x, y, **params):
pass
def _force(self, r, x, y, **params):
pass
def _derivative(self, param, r, **params):
#not real derivative, just used to test functionality
r,d,s = self._zeros(r)
if param == 'x':
d[:] = 2*r
elif param == 'y':
d[:] = 3*r
if s:
d = d.item()
return d
class test_PairPotential(unittest.TestCase):
"""Unit tests for relentless.potential.PairPotential"""
def test_init(self):
"""Test creation from data"""
#test creation with only m
p = LinPot(types=('1',), params=('m',))
p.coeff['1','1']['m'] = 3.5
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = False
coeff['1','1']['rmax'] = False
coeff['1','1']['shift'] = False
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with m and rmin
p = LinPot(types=('1',), params=('m','rmin'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['rmin'] = 0.0
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = 0.0
coeff['1','1']['rmax'] = False
coeff['1','1']['shift'] = False
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with m and rmax
p = LinPot(types=('1',), params=('m','rmax'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['rmax'] = 1.0
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = False
coeff['1','1']['rmax'] = 1.0
coeff['1','1']['shift'] = False
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with m and shift
p = LinPot(types=('1',), params=('m','shift'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['shift'] = True
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = False
coeff['1','1']['rmax'] = False
coeff['1','1']['shift'] = True
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
#test creation with all params
p = LinPot(types=('1',), params=('m','rmin','rmax','shift'))
p.coeff['1','1']['m'] = 3.5
p.coeff['1','1']['rmin'] = 0.0
p.coeff['1','1']['rmax'] = 1.0
p.coeff['1','1']['shift'] = True
coeff = relentless.potential.PairParameters(types=('1',), params=('m','rmin','rmax','shift'))
coeff['1','1']['m'] = 3.5
coeff['1','1']['rmin'] = 0.0
coeff['1','1']['rmax'] = 1.0
coeff['1','1']['shift'] = True
self.assertCountEqual(p.coeff.types, coeff.types)
self.assertCountEqual(p.coeff.params, coeff.params)
self.assertDictEqual(p.coeff.evaluate(('1','1')), coeff.evaluate(('1','1')))
def test_energy(self):
"""Test energy method"""
p = LinPot(types=('1',), params=('m',))
p.coeff['1','1']['m'] = 2.0
#test with no cutoffs
u = p.energy(pair=('1','1'), r=0.5)
self.assertAlmostEqual(u, 1.0)
u = p.energy(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(u, [0.5,1.5])
#test with rmin set
p.coeff['1','1']['rmin'] = 0.5
u = p.energy(pair=('1','1'), r=0.6)
self.assertAlmostEqual(u, 1.2)
u = p.energy(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(u, [1.0,1.5])
#test with rmax set
p.coeff['1','1'].update(rmin=False, rmax=1.5)
u = p.energy(pair=('1','1'), r=1.0)
self.assertAlmostEqual(u, 2.0)
u = p.energy(pair=('1','1'), r=[0.25,1.75])
numpy.testing.assert_allclose(u, [0.5,3.0])
#test with rmin and rmax set
p.coeff['1','1']['rmin'] = 0.5
u = p.energy(pair=('1','1'), r=0.75)
self.assertAlmostEqual(u, 1.5)
u = p.energy(pair=('1','1'), r=[0.25,0.5,1.5,1.75])
numpy.testing.assert_allclose(u, [1.0,1.0,3.0,3.0])
#test with shift set
p.coeff['1','1'].update(shift=True)
u = p.energy(pair=('1','1'), r=0.5)
self.assertAlmostEqual(u, -2.0)
u = p.energy(pair=('1','1'), r=[0.25,0.75,1.0,1.5])
numpy.testing.assert_allclose(u, [-2.0,-1.5,-1.0,0.0])
#test with shift set without rmax
p.coeff['1','1'].update(rmax=False)
with self.assertRaises(ValueError):
u = p.energy(pair=('1','1'), r=0.5)
def test_force(self):
"""Test force method"""
p = LinPot(types=('1',), params=('m',))
p.coeff['1','1']['m'] = 2.0
#test with no cutoffs
f = p.force(pair=('1','1'), r=0.5)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(f, [-2.0,-2.0])
#test with rmin set
p.coeff['1','1']['rmin'] = 0.5
f = p.force(pair=('1','1'), r=0.6)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,0.75])
numpy.testing.assert_allclose(f, [0.0,-2.0])
#test with rmax set
p.coeff['1','1'].update(rmin=False, rmax=1.5)
f = p.force(pair=('1','1'), r=1.0)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,1.75])
numpy.testing.assert_allclose(f, [-2.0,0.0])
#test with rmin and rmax set
p.coeff['1','1']['rmin'] = 0.5
f = p.force(pair=('1','1'), r=0.75)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[0.25,0.5,1.5,1.75])
numpy.testing.assert_allclose(f, [0.0,-2.0,-2.0,0.0])
#test with shift set
p.coeff['1','1'].update(shift=True)
f = p.force(pair=('1','1'), r=0.5)
self.assertAlmostEqual(f, -2.0)
f = p.force(pair=('1','1'), r=[1.0,1.5])
numpy.testing.assert_allclose(f, [-2.0,-2.0])
def test_derivative_values(self):
"""Test derivative method with different param values"""
p = LinPot(types=('1',), params=('m',))
x = relentless.variable.DesignVariable(value=2.0)
p.coeff['1','1']['m'] = x
#test with no cutoffs
d = p.derivative(pair=('1','1'), var=x, r=0.5)
self.assertAlmostEqual(d, 0.5)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,0.75])
numpy.testing.assert_allclose(d, [0.25,0.75])
#test with rmin set
rmin = relentless.variable.DesignVariable(value=0.5)
p.coeff['1','1']['rmin'] = rmin
d = p.derivative(pair=('1','1'), var=x, r=0.6)
self.assertAlmostEqual(d, 0.6)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,0.75])
numpy.testing.assert_allclose(d, [0.5,0.75])
#test with rmax set
rmax = relentless.variable.DesignVariable(value=1.5)
p.coeff['1','1'].update(rmin=False, rmax=rmax)
d = p.derivative(pair=('1','1'), var=x, r=1.0)
self.assertAlmostEqual(d, 1.0)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,1.75])
numpy.testing.assert_allclose(d, [0.25,1.5])
#test with rmin and rmax set
p.coeff['1','1']['rmin'] = rmin
d = p.derivative(pair=('1','1'), var=x, r=0.75)
self.assertAlmostEqual(d, 0.75)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,0.5,1.5,1.75])
numpy.testing.assert_allclose(d, [0.5,0.5,1.5,1.5])
#test w.r.t. rmin and rmax
d = p.derivative(pair=('1','1'), var=rmin, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [2.0,0.0,0.0])
d = p.derivative(pair=('1','1'), var=rmax, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [0.0,0.0,2.0])
#test parameter derivative with shift set
p.coeff['1','1'].update(shift=True)
d = p.derivative(pair=('1','1'), var=x, r=0.5)
self.assertAlmostEqual(d, -1.0)
d = p.derivative(pair=('1','1'), var=x, r=[0.25,1.0,1.5,1.75])
numpy.testing.assert_allclose(d, [-1.0,-0.5,0.0,0.0])
#test w.r.t. rmin and rmax, shift set
d = p.derivative(pair=('1','1'), var=rmin, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [2.0,0.0,0.0])
d = p.derivative(pair=('1','1'), var=rmax, r=[0.25,1.0,2.0])
numpy.testing.assert_allclose(d, [-2.0,-2.0,0.0])
def test_derivative_types(self):
"""Test derivative method with different param types."""
q = LinPot(types=('1',), params=('m',))
x = relentless.variable.DesignVariable(value=4.0)
y = relentless.variable.DesignVariable(value=64.0)
z = relentless.variable.GeometricMean(x, y)
q.coeff['1','1']['m'] = z
#test with respect to dependent variable parameter
d = q.derivative(pair=('1','1'), var=z, r=2.0)
self.assertAlmostEqual(d, 2.0)
#test with respect to independent variable on which parameter is dependent
d = q.derivative(pair=('1','1'), var=x, r=1.5)
self.assertAlmostEqual(d, 3.0)
d = q.derivative(pair=('1','1'), var=y, r=4.0)
self.assertAlmostEqual(d, 0.5)
#test invalid derivative w.r.t. scalar
a = 2.5
q.coeff['1','1']['m'] = a
with self.assertRaises(TypeError):
d = q.derivative(pair=('1','1'), var=a, r=2.0)
#test with respect to independent variable which is related to a SameAs variable
r = TwoVarPot(types=('1',), params=('x','y'))
r.coeff['1','1']['x'] = x
r.coeff['1','1']['y'] = relentless.variable.SameAs(x)
d = r.derivative(pair=('1','1'), var=x, r=4.0)
self.assertAlmostEqual(d, 20.0)
r.coeff['1','1']['y'] = x
r.coeff['1','1']['x'] = relentless.variable.SameAs(x)
d = r.derivative(pair=('1','1'), var=x, r=4.0)
self.assertAlmostEqual(d, 20.0)
def test_iteration(self):
"""Test iteration on PairPotential object"""
p = LinPot(types=('1','2'), params=('m',))
for pair in p.coeff:
p.coeff[pair]['m'] = 2.0
p.coeff[pair]['rmin'] = 0.0
p.coeff[pair]['rmax'] = 1.0
self.assertDictEqual(p.coeff['1','1'].todict(), {'m':2.0, 'rmin':0.0, 'rmax':1.0, 'shift':False})
self.assertDictEqual(p.coeff['1','2'].todict(), {'m':2.0, 'rmin':0.0, 'rmax':1.0, 'shift':False})
self.assertDictEqual(p.coeff['2','2'].todict(), {'m':2.0, 'rmin':0.0, 'rmax':1.0, 'shift':False})
def test_save(self):
"""Test saving to file"""
temp = tempfile.NamedTemporaryFile()
p = LinPot(types=('1',), params=('m','rmin','rmax'))
p.coeff['1','1']['m'] = 2.0
p.coeff['1','1']['rmin'] = 0.0
p.coeff['1','1']['rmax'] = 1.0
p.coeff['1','1']['shift'] = True
p.save(temp.name)
with open(temp.name, 'r') as f:
x = json.load(f)
self.assertEqual(p.coeff['1','1']['m'], x["('1', '1')"]['m'])
self.assertEqual(p.coeff['1','1']['rmin'], x["('1', '1')"]['rmin'])
self.assertEqual(p.coeff['1','1']['rmax'], x["('1', '1')"]['rmax'])
self.assertEqual(p.coeff['1','1']['shift'], x["('1', '1')"]['shift'])
temp.close()
class test_LennardJones(unittest.TestCase):
"""Unit tests for relentless.potential.LennardJones"""
def test_init(self):
"""Test creation from data"""
lj = relentless.potential.LennardJones(types=('1',))
coeff = relentless.potential.PairParameters(types=('1',),
params=('epsilon','sigma','rmin','rmax','shift'))
for pair in coeff.pairs:
coeff[pair]['rmin'] = False
coeff[pair]['rmax'] = False
coeff[pair]['shift'] = False
self.assertCountEqual(lj.coeff.types, coeff.types)
self.assertCountEqual(lj.coeff.params, coeff.params)
def test_energy(self):
"""Test _energy method"""
lj = relentless.potential.LennardJones(types=('1',))
#test scalar r
r_input = 0.5
u_actual = 0
u = lj._energy(r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(u, u_actual)
#test array r
r_input = numpy.array([0,1,1.5])
u_actual = numpy.array([numpy.inf,-0.061523438,-0.0054794417])
u = lj._energy(r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(u, u_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = lj._energy(r=r_input, epsilon=1.0, sigma=-1.0)
def test_force(self):
"""Test _force method"""
lj = relentless.potential.LennardJones(types=('1',))
#test scalar r
r_input = 0.5
f_actual = 48
f = lj._force(r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(f, f_actual)
#test array r
r_input = numpy.array([0,1,1.5])
f_actual = numpy.array([numpy.inf,-0.36328125,-0.02188766])
f = lj._force(r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(f, f_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = lj._force(r=r_input, epsilon=1.0, sigma=-1.0)
def test_derivative(self):
"""Test _derivative method"""
lj = relentless.potential.LennardJones(types=('1',))
#w.r.t. epsilon
#test scalar r
r_input = 0.5
d_actual = 0
d = lj._derivative(param='epsilon', r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([numpy.inf,-0.061523438,-0.0054794417])
d = lj._derivative(param='epsilon', r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(d, d_actual)
#w.r.t. sigma
#test scalar r
r_input = 0.5
d_actual = 48
d = lj._derivative(param='sigma', r=r_input, epsilon=1.0, sigma=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([numpy.inf,-0.7265625,-0.06566298])
d = lj._derivative(param='sigma', r=r_input, epsilon=1.0, sigma=0.5)
numpy.testing.assert_allclose(d, d_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = lj._derivative(param='sigma', r=r_input, epsilon=1.0, sigma=-1.0)
#test invalid param
with self.assertRaises(ValueError):
u = lj._derivative(param='simga', r=r_input, epsilon=1.0, sigma=1.0)
class test_PairSpline(unittest.TestCase):
"""Unit tests for relentless.potential.PairSpline"""
def test_init(self):
"""Test creation from data"""
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
self.assertEqual(s.num_knots, 3)
self.assertEqual(s.mode, 'diff')
coeff = relentless.potential.PairParameters(types=('1',),
params=('r-0','r-1','r-2','knot-0','knot-1','knot-2','rmin','rmax','shift'))
self.assertCountEqual(s.coeff.types, coeff.types)
self.assertCountEqual(s.coeff.params, coeff.params)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
self.assertEqual(s.num_knots, 3)
self.assertEqual(s.mode, 'value')
coeff = relentless.potential.PairParameters(types=('1',),
params=('r-0','r-1','r-2','knot-0','knot-1','knot-2','rmin','rmax','shift'))
self.assertCountEqual(s.coeff.types, coeff.types)
self.assertCountEqual(s.coeff.params, coeff.params)
#test invalid number of knots
with self.assertRaises(ValueError):
s = relentless.potential.PairSpline(types=('1',), num_knots=1)
#test invalid mode
with self.assertRaises(ValueError):
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='val')
def test_from_array(self):
"""Test from_array method and knots generator"""
r_arr = [1,2,3]
u_arr = [9,4,1]
u_arr_diff = [5,3,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
for i,(r,k) in enumerate(s.knots(pair=('1','1'))):
self.assertAlmostEqual(r.value, r_arr[i])
self.assertAlmostEqual(k.value, u_arr_diff[i])
self.assertEqual(r.const, True)
if i == s.num_knots-1:
self.assertEqual(k.const, True)
else:
self.assertEqual(k.const, False)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
for i,(r,k) in enumerate(s.knots(pair=('1','1'))):
self.assertAlmostEqual(r.value, r_arr[i])
self.assertAlmostEqual(k.value, u_arr[i])
self.assertEqual(r.const, True)
if i == s.num_knots-1:
self.assertEqual(k.const, True)
else:
self.assertEqual(k.const, False)
#test invalid r and u shapes
r_arr = [2,3]
with self.assertRaises(ValueError):
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
r_arr = [1,2,3]
u_arr = [1,2]
with self.assertRaises(ValueError):
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
def test_energy(self):
"""Test energy method"""
r_arr = [1,2,3]
u_arr = [9,4,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
u_actual = numpy.array([6.25,2.25,1])
u = s.energy(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(u, u_actual)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
u_actual = numpy.array([6.25,2.25,1])
u = s.energy(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(u, u_actual)
#test PairSpline with 2 knots
s = relentless.potential.PairSpline(types=('1',), num_knots=2, mode='value')
s.from_array(pair=('1','1'), r=[1,2], u=[4,2])
u = s.energy(pair=('1','1'), r=1.5)
self.assertAlmostEqual(u, 3)
def test_force(self):
"""Test force method"""
r_arr = [1,2,3]
u_arr = [9,4,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
f_actual = numpy.array([5,3,0])
f = s.force(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(f, f_actual)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
f_actual = numpy.array([5,3,0])
f = s.force(pair=('1','1'), r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(f, f_actual)
#test PairSpline with 2 knots
s = relentless.potential.PairSpline(types=('1',), num_knots=2, mode='value')
s.from_array(pair=('1','1'), r=[1,2], u=[4,2])
f = s.force(pair=('1','1'), r=1.5)
self.assertAlmostEqual(f, 2)
def test_derivative(self):
"""Test derivative method"""
r_arr = [1,2,3]
u_arr = [9,4,1]
#test diff mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3)
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
d_actual = numpy.array([1.125,0.625,0])
param = list(s.knots(('1','1')))[1][1]
d = s.derivative(pair=('1','1'), var=param, r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(d, d_actual)
#test value mode
s = relentless.potential.PairSpline(types=('1',), num_knots=3, mode='value')
s.from_array(pair=('1','1'), r=r_arr, u=u_arr)
d_actual = numpy.array([0.75,0.75,0])
param = list(s.knots(('1','1')))[1][1]
d = s.derivative(pair=('1','1'), var=param, r=[1.5,2.5,3.5])
numpy.testing.assert_allclose(d, d_actual)
class test_Yukawa(unittest.TestCase):
"""Unit tests for relentless.potential.Yukawa"""
def test_init(self):
"""Test creation from data"""
y = relentless.potential.Yukawa(types=('1',))
coeff = relentless.potential.PairParameters(types=('1',), params=('epsilon','kappa','rmin','rmax','shift'))
for pair in coeff.pairs:
coeff[pair]['rmin'] = False
coeff[pair]['rmax'] = False
coeff[pair]['shift'] = False
self.assertCountEqual(y.coeff.types, coeff.types)
self.assertCountEqual(y.coeff.params, coeff.params)
def test_energy(self):
"""Test _energy method"""
y = relentless.potential.Yukawa(types=('1',))
#test scalar r
r_input = 0.5
u_actual = 1.5576016
u = y._energy(r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(u, u_actual)
#test array r
r_input = numpy.array([0,1,1.5])
u_actual = numpy.array([numpy.inf,0.60653066,0.31491104])
u = y._energy(r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(u, u_actual)
#test negative kappa
with self.assertRaises(ValueError):
u = y._energy(r=r_input, epsilon=1.0, kappa=-1.0)
def test_force(self):
"""Test _force method"""
y = relentless.potential.Yukawa(types=('1',))
#test scalar r
r_input = 0.5
f_actual = 3.8940039
f = y._force(r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(f, f_actual)
#test array r
r_input = numpy.array([0,1,1.5])
f_actual = numpy.array([numpy.inf,0.90979599,0.36739621])
f = y._force(r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(f, f_actual)
#test negative kappa
with self.assertRaises(ValueError):
u = y._force(r=r_input, epsilon=1.0, kappa=-1.0)
def test_derivative(self):
"""Test _derivative method"""
y = relentless.potential.Yukawa(types=('1',))
#w.r.t. epsilon
#test scalar r
r_input = 0.5
d_actual = 1.5576016
d = y._derivative(param='epsilon', r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([numpy.inf,0.60653066,0.31491104])
d = y._derivative(param='epsilon', r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(d, d_actual)
#w.r.t. kappa
#test scalar r
r_input = 0.5
d_actual = -0.77880078
d = y._derivative(param='kappa', r=r_input, epsilon=1.0, kappa=0.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([0,1,1.5])
d_actual = numpy.array([-1,-0.60653066,-0.47236655])
d = y._derivative(param='kappa', r=r_input, epsilon=1.0, kappa=0.5)
numpy.testing.assert_allclose(d, d_actual)
#test negative kappa
with self.assertRaises(ValueError):
u = y._derivative(param='kappa', r=r_input, epsilon=1.0, kappa=-1.0)
#test invalid param
with self.assertRaises(ValueError):
u = y._derivative(param='kapppa', r=r_input, epsilon=1.0, kappa=1.0)
class test_Depletion(unittest.TestCase):
"""Unit tests for relentless.potential.Depletion"""
def test_init(self):
"""Test creation from data"""
dp = relentless.potential.Depletion(types=('1','2'))
coeff = relentless.potential.PairParameters(types=('1','2'),
params=('P','sigma_i','sigma_j','sigma_d','rmin','rmax','shift'))
self.assertCountEqual(dp.coeff.types, coeff.types)
self.assertCountEqual(dp.coeff.params, coeff.params)
def test_cutoff_init(self):
"""Test creation of Depletion.Cutoff from data"""
#create object dependent on scalars
w = relentless.potential.Depletion.Cutoff(sigma_i=1.0, sigma_j=2.0, sigma_d=0.25)
self.assertAlmostEqual(w.value, 1.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v.value for p,v in w.depends},
{'sigma_i':1.0, 'sigma_j':2.0, 'sigma_d':0.25})
#change parameter value
w.sigma_j.value = 4.0
self.assertAlmostEqual(w.value, 2.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v.value for p,v in w.depends},
{'sigma_i':1.0, 'sigma_j':4.0, 'sigma_d':0.25})
#create object dependent on variables
a = relentless.variable.DesignVariable(value=1.0)
b = relentless.variable.DesignVariable(value=2.0)
c = relentless.variable.DesignVariable(value=0.25)
w = relentless.potential.Depletion.Cutoff(sigma_i=a, sigma_j=b, sigma_d=c)
self.assertAlmostEqual(w.value, 1.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v for p,v in w.depends},
{'sigma_i':a, 'sigma_j':b, 'sigma_d':c})
#change parameter value
b.value = 4.0
self.assertAlmostEqual(w.value, 2.75)
self.assertCountEqual(w.params, ('sigma_i','sigma_j','sigma_d'))
self.assertDictEqual({p:v for p,v in w.depends},
{'sigma_i':a, 'sigma_j':b, 'sigma_d':c})
def test_cutoff_derivative(self):
"""Test Depletion.Cutoff._derivative method"""
w = relentless.potential.Depletion.Cutoff(sigma_i=1.0, sigma_j=2.0, sigma_d=0.25)
#calculate w.r.t. sigma_i
dw = w._derivative('sigma_i')
self.assertEqual(dw, 0.5)
#calculate w.r.t. sigma_j
dw = w._derivative('sigma_j')
self.assertEqual(dw, 0.5)
#calculate w.r.t. sigma_d
dw = w._derivative('sigma_d')
self.assertEqual(dw, 1.0)
#invalid parameter calculation
with self.assertRaises(ValueError):
dw = w._derivative('sigma')
def test_energy(self):
"""Test _energy and energy methods"""
dp = relentless.potential.Depletion(types=('1',))
#test scalar r
r_input = 3
u_actual = -4.6786414
u = dp._energy(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(u, u_actual)
#test array r
r_input = numpy.array([1.75,4.25])
u_actual = numpy.array([-16.59621119,0])
u = dp._energy(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
numpy.testing.assert_allclose(u, u_actual)
#test negative sigma
with self.assertRaises(ValueError):
u = dp._energy(r=r_input, P=1, sigma_i=-1, sigma_j=1, sigma_d=1)
with self.assertRaises(ValueError):
u = dp._energy(r=r_input, P=1, sigma_i=1, sigma_j=-1, sigma_d=1)
with self.assertRaises(ValueError):
u = dp._energy(r=r_input, P=1, sigma_i=1, sigma_j=1, sigma_d=-1)
#test energy outside of low/high bounds
dp.coeff['1','1'].update(P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
r_input = numpy.array([1,5])
u_actual = numpy.array([-25.7514468,0])
u = dp.energy(pair=('1','1'), r=r_input)
numpy.testing.assert_allclose(u, u_actual)
self.assertAlmostEqual(dp.coeff['1','1']['rmax'].value, 4.25)
def test_force(self):
"""Test _force and force methods"""
dp = relentless.potential.Depletion(types=('1',))
#test scalar r
r_input = 3
f_actual = -7.0682426
f = dp._force(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(f, f_actual)
#test array r
r_input = numpy.array([1.75,4.25])
f_actual = numpy.array([-11.54054444,0])
f = dp._force(r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
numpy.testing.assert_allclose(f, f_actual)
#test negative sigma
with self.assertRaises(ValueError):
f = dp._force(r=r_input, P=1, sigma_i=-1, sigma_j=1, sigma_d=1)
with self.assertRaises(ValueError):
f = dp._force(r=r_input, P=1, sigma_i=1, sigma_j=-1, sigma_d=1)
with self.assertRaises(ValueError):
f = dp._force(r=r_input, P=1, sigma_i=1, sigma_j=1, sigma_d=-1)
#test force outside of low/high bounds
dp.coeff['1','1'].update(P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
r_input = numpy.array([1,5])
f_actual = numpy.array([-12.5633027,0])
f = dp.force(pair=('1','1'), r=r_input)
numpy.testing.assert_allclose(f, f_actual)
self.assertAlmostEqual(dp.coeff['1','1']['rmax'].value, 4.25)
def test_derivative(self):
"""Test _derivative and derivative methods"""
dp = relentless.potential.Depletion(types=('1',))
#w.r.t. P
#test scalar r
r_input = 3
d_actual = -4.6786414
d = dp._derivative(param='P', r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([1.75,4.25])
d_actual = numpy.array([-16.59621119,0])
d = dp._derivative(param='P', r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
numpy.testing.assert_allclose(d, d_actual)
#w.r.t. sigma_i
#test scalar r
r_input = 3
d_actual = -4.25424005
d = dp._derivative(param='sigma_i', r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = numpy.array([1.75,4.25])
d_actual = numpy.array([-8.975979,0])
d = dp._derivative(param='sigma_i', r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
numpy.testing.assert_allclose(d, d_actual)
#w.r.t. sigma_j
#test scalar r
r_input = 3
d_actual = -4.04970928
d = dp._derivative(param='sigma_j', r=r_input, P=1, sigma_i=1.5, sigma_j=2, sigma_d=2.5)
self.assertAlmostEqual(d, d_actual)
#test array r
r_input = | numpy.array([1.75,4.25]) | numpy.array |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
"""
import time as tm
import gc
import numpy as np
from numba import njit, guvectorize, complex128, void, prange
import sharedFunctions as shFncs
from beamformer import faverage # The benchmark (created with scipy.weave)
from beamformer_withoutMP import faverage_OhneMP
#%% Numba - njit
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]))
def loops_Njit(csm, SpecAllChn):
nFreqs = csm.shape[0]
nMics = csm.shape[1]
for cntFreq in range(nFreqs):
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntFreq, cntRow])
for cntColumn in range(nMics):
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
return csm
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]), parallel=True)
def loops_Njit_Parallel(csm, SpecAllChn):
nFreqs = csm.shape[0]
nMics = csm.shape[1]
for cntFreq in range(nFreqs):
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntFreq, cntRow])
for cntColumn in range(nMics):
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
return csm
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]), parallel=True)
def loops_Njit_Parallel_Prange(csm, SpecAllChn):
nFreqs = csm.shape[0]
nMics = csm.shape[1]
for cntFreq in range(nFreqs):
for cntRow in prange(nMics):
temp = np.conj(SpecAllChn[cntFreq, cntRow])
for cntColumn in range(nMics):
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
return csm
#%% create CSM via complex transpose of lower triangular matrix
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]))
def loopsComplexTranspose_Numpy(csm, SpecAllChn):
nFreqs = csm.shape[0]
for cntFreq in range(nFreqs):
csm[cntFreq, :, :] += np.outer(np.conj(SpecAllChn[cntFreq, :]), SpecAllChn[cntFreq, :])
return csm
@njit(complex128[:,:,:](complex128[:,:,:], complex128[:,:]))
def loopsOnlyTriangularMatrix_Njit(csm, SpecAllChn):
""" one could only build the lower triangular csm and then, after averaging
over all the ensenbles creating the whole csm by complex transposing.
One could maybe use sparse CSR/CSC matrices (even though the CSM is not too big, so advantages are maybe small.)
"""
nFreqs = csm.shape[0]
nMics = csm.shape[1]
for cntFreq in range(nFreqs):
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntFreq, cntRow])
for cntColumn in range(cntRow): # only half of the operations in respect to 'loops_Njit'
csm[cntFreq, cntRow, cntColumn] += temp * SpecAllChn[cntFreq, cntColumn]
return csm
#%% Numba - guvectorize
# =============================================================================
# I don't think that parallelizing over the mics is in this case feasible.
# At least i can't think of a way to abstract the faverage procedure on one level below.
# It is however feasible to parallelize over the frequencies
# =============================================================================
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
nopython=True, target='cpu')
def loops_GuvectorizeOverFreqs_singleThreadedCPU(csm, SpecAllChn, result):
nMics = csm.shape[0]
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntRow])
for cntColumn in range(nMics):
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
nopython=True, target='parallel')
def loops_GuvectorizeOverFreqs_multiThreadedCPU(csm, SpecAllChn, result):
nMics = csm.shape[0]
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntRow])
for cntColumn in range(nMics):
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
nopython=True, target='cpu')
def loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_singleThreadedCPU(csm, SpecAllChn, result):
nMics = csm.shape[0]
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntRow])
for cntColumn in range(cntRow):
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
@guvectorize([void(complex128[:,:], complex128[:], complex128[:,:])], '(m,m),(m)->(m,m)',
nopython=True, target='parallel')
def loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_multiThreadedCPU(csm, SpecAllChn, result):
nMics = csm.shape[0]
for cntRow in range(nMics):
temp = np.conj(SpecAllChn[cntRow])
for cntColumn in range(cntRow):
result[cntRow, cntColumn] = csm[cntRow, cntColumn] + temp * SpecAllChn[cntColumn]
#%% MAIN
listOfMics = [500, 700, 1000] # default: 64
listOfNFreqs = [2**cnt for cnt in range(4, 11)] # default: 2048
nTrials = 10
#==============================================================================
# The benchmark function 'faverage' and also other implementations of
# the beamformer create a lot of overhead, which influences the computational
# effort of the succeding function. This is mostly the case, if concurrent
# calculations are done (multiple cores). So often the first trial of a new
# function takes some time longer than the other trials.
#==============================================================================
#funcsToTrial = [loopsComplexTranspose_Numpy, loops_Njit, loops_Njit_Parallel, loops_Njit_Parallel_Prange,
# loopsOnlyTriangularMatrix_Njit, loops_GuvectorizeOverFreqs_singleThreadedCPU,
# loops_GuvectorizeOverFreqs_multiThreadedCPU,
# loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_singleThreadedCPU,
# loopsOnlyTriangularMatrix_GuvectorizeOverFreqs_multiThreadedCPU,
# faverage_OhneMP, faverage]
funcsToTrial = [loopsOnlyTriangularMatrix_Njit, faverage]
for nMics in listOfMics:
for nFreqs in listOfNFreqs:
# Init
print(10*'-' + 'New Test configuration: nMics=%s, nFreqs=%s' %(nMics, nFreqs) + 10*'-')
print(10*'-' + 'Creation of inputInputs' + 10*'-')
csm = np.zeros((nFreqs, nMics, nMics), np.complex128)
spectrumInput = np.random.rand(nFreqs, nMics) + \
1j*np.random.rand(nFreqs, nMics)
nameOfFuncsToTrial = map(lambda x: x.__name__, funcsToTrial)
nameOfFuncsForError = [funcName for funcName in nameOfFuncsToTrial if funcName != 'faverage']
maxRelativeDeviation = np.zeros((len(funcsToTrial), nTrials))
maxAbsoluteDeviation = np.zeros((len(funcsToTrial), nTrials))
timeConsumption = [[] for _ in range(len(funcsToTrial))]
indOfBaselineFnc = nameOfFuncsToTrial.index('faverage')
print(10*'-' + 'Onetime calculation of "faverage" for error reference' + 10*'-')
faverage(csm, spectrumInput)
resultReference = csm # For relative/absolute error
gc.collect()
# Testing
print(10*'-' + 'Testing of functions' + 10*'-')
cntFunc = 0
for func in funcsToTrial:
print(func.__name__)
for cntTrials in xrange(nTrials):
csm = np.zeros((nFreqs, nMics, nMics), np.complex128)
resultHelp = np.zeros((nFreqs, nMics, nMics), np.complex128)
if func.__name__ == 'faverage' or func.__name__ == 'faverage_OhneMP':
t0 = tm.time()
func(csm, spectrumInput)
t1 = tm.time()
result = csm
elif func.__name__ == 'loops_GuvectorizeOverFreqs':
t0 = tm.time()
func(csm, spectrumInput, resultHelp)
t1 = tm.time()
result = resultHelp
else:
t0 = tm.time()
output = func(csm, spectrumInput)
t1 = tm.time()
result = output
timeConsumption[cntFunc].append(t1 - t0)
relativeDiffBetweenNewCodeAndRef = (result - resultReference) / (result + resultReference) * 2 # error in relation to the resulting value
maxRelativeDeviation[cntFunc, cntTrials] = np.amax(np.amax(np.amax(abs(relativeDiffBetweenNewCodeAndRef), axis=0), axis=0), axis=0) + 10.0**-20 # relative error in inf-norm
maxAbsoluteDeviation[cntFunc, cntTrials] = np.amax(np.amax(np.amax(abs(result - resultReference), axis=0), axis=0), axis=0) + 10.0**-20 # absolute error in inf-norm
cntFunc += 1
factorTimeConsump = [ | np.mean(timeConsumption[cnt]) | numpy.mean |
import os
import math
import numpy as np
import tensorflow as tf
from enum import Enum
from time import time
from configparser import ConfigParser
from util.stats import DistributionInfo as Stat
PROJECT_ROOT = str(__file__).replace("Networks\\q_learning.py", "")
TEMP_DIR = PROJECT_ROOT + "util/temp/"
LOG_DIR = PROJECT_ROOT + "util/logs/"
DEFAULT_SAVE_PATH = PROJECT_ROOT + "Networks/saved/"
class NeuralNetwork:
def __init__(self, name, input_shape, n_classes, save_path=DEFAULT_SAVE_PATH):
self.net_config = ConfigParser()
self.net_config["Format"] = {"input_shape": str(input_shape).rstrip("]").lstrip("["), "n_classes": str(n_classes), "n_layers": 0}
self.net_config["Options"] = {"save_path": save_path}
self.net_config["Stats"] = {"total_steps": 0, "total_time": 0}
self.name = name
self.n_classes = n_classes
self.save_path = save_path
self.x = tf.placeholder(dtype=tf.float32, shape=[None] + input_shape, name="x")
self.learning_rate = tf.placeholder(dtype=tf.float32, shape=[])
self.q_values_new = tf.placeholder(tf.float32, shape=[None, n_classes], name='q_values_new')
self.output = self.x
self.loss = None
self.session = None
self.saver = None
self.optimizer = None
self.merged_summary = None
# todo adding file_writer produces error: "TypeError: Expected an event_pb2.Event proto, but got <class 'tensorflow.core.util.event_pb2.Event'>"
# self.file_writer = tf.summary.FileWriter(LOG_DIR + self.name + "/tb")
self.committed = False
self.n_layers = 0
self.tf_config = tf.ConfigProto()
self.tf_config.gpu_options.allow_growth = True
def add_fc(self, size, activation, verbose=False):
if self.committed:
if verbose:
print(self.name, "is already committed. Can not add fc layer")
return
if verbose:
print("adding fc layer to {0:s} with size {1:d} and activation {2:s}".format(self.name, size, ActivationType.string_of(activation)))
self.output = tf.layers.dense(self.output, units=size,
activation=activation,
kernel_initializer=tf.glorot_normal_initializer(),
name="L"+str(self.n_layers)+"-fc",
bias_initializer=tf.random_normal_initializer())
with tf.name_scope("L" + str(self.n_layers)):
tf.summary.histogram("act", self.output)
self.net_config["Layer" + str(self.n_layers)] = {"type": "fc", "size": size, "activation": ActivationType.string_of(activation)}
self.n_layers += 1
self.net_config["Format"]["n_layers"] = str(self.n_layers)
if verbose:
print("Added fc layer to", self.name)
def add_drop_out(self, rate, verbose=False):
if self.committed:
if verbose:
print(self.name, "is already committed. Can not add drop out layer")
return
if verbose:
print("adding drop out layer to {0:s} with rate {1:.2f}".format(self.name, rate))
self.output = tf.layers.dropout(self.output, rate,
name="L"+str(self.n_layers)+"-do")
with tf.name_scope("L" + str(self.n_layers)):
tf.summary.histogram("act", self.output)
self.net_config["Layer" + str(self.n_layers)] = {"type": "do", "rate": rate}
self.n_layers += 1
self.net_config["Format"]["n_layers"] = str(self.n_layers)
if verbose:
print("Added drop out layer to", self.name)
def commit(self, verbose=False):
if self.committed:
if verbose:
print(self.name, "is already committed. Can not commit again")
return
self.output = tf.layers.dense(self.output, units=self.n_classes,
activation=ActivationType.RELU,
kernel_initializer=tf.glorot_normal_initializer(),
name="out",
bias_initializer=tf.random_normal_initializer())
squared_error = tf.square(self.output - self.q_values_new)
sum_squared_error = tf.reduce_sum(squared_error, axis=1)
self.loss = tf.reduce_mean(sum_squared_error)
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
self.saver = tf.train.Saver()
self.session = tf.Session(config=self.tf_config)
self.session.run(tf.global_variables_initializer())
# print("merging summaries")
self.merged_summary = tf.summary.merge_all()
# print("adding graph")
# self.file_writer.add_graph(self.session.graph)
if verbose:
print(self.name, "was successfully committed")
def close(self):
self.session.close()
print(self.name, "was closed")
def run(self, states):
return self.session.run(self.output, feed_dict={self.x: states})[0] # todo remove [0] when more than one operation is run
def train(self, trainining_data, batch_size, n_epochs, learning_rate=1e-3, save=True):
steps = 0
start_time = time()
n_batches = math.ceil(len(trainining_data[0]) / batch_size)
for epoch in range(n_epochs):
for i in range(n_batches):
start = i*batch_size
end = (i+1)*batch_size if i < n_batches else -1 # the last batch may not be full size, in this case all remaining elements will be chosen
states_batch = trainining_data[0][start:end]
q_values_batch = trainining_data[1][start:end]
steps += len(states_batch)
feed_dict = {self.x: states_batch,
self.q_values_new: q_values_batch,
self.learning_rate: learning_rate}
_, summary = self.session.run([self.optimizer, self.merged_summary], feed_dict=feed_dict)
# print("adding summary")
# self.file_writer.add_summary(summary, global_step=self.get_step_count() + steps)
self.net_config["Stats"]["total_steps"] = str(int(self.net_config["Stats"]["total_steps"]) + steps)
self.net_config["Stats"]["total_time"] = str(float(self.net_config["Stats"]["total_time"]) + time() - start_time)
if save:
self.save()
def save(self):
if not os.path.isdir(self.save_path + self.name):
os.makedirs(self.save_path + self.name)
with open(self.save_path + self.name + "/net.cfg", "w") as cfg_file:
self.net_config.write(cfg_file)
save_path = self.saver.save(self.session, self.save_path + self.name + "/" + self.name)
print("saved net to:", save_path)
def load(self, ckp_file):
self.saver.restore(self.session, save_path=ckp_file)
@staticmethod
def restore(name, path=DEFAULT_SAVE_PATH, new_name=None, verbose=False):
"""
restores a net from the file system
:param name: the name of the saved net
:param path: the directory the files associated with the saved net are stored
:param new_name: optional, gives the net a new name
:param verbose: en-/disables additional console output
:return: the restored net object
"""
if verbose:
print("restoring {0:s} from {1:s}".format(name, path))
print("config file:", path + name + "/net.cfg")
print("tf checkpoint:", path + name + "/" + name + ".ckpt")
config = ConfigParser()
config.read(path + name + "/net.cfg")
name_ = (name if new_name is None else new_name)
input_shape = [int(s) for s in config["Format"]["input_shape"].split(",")]
n_classes = int(config["Format"]["n_classes"])
net = NeuralNetwork(name_, input_shape, n_classes, save_path=path)
n_layers = int(config["Format"]["n_layers"])
if verbose:
print("N_Layers:", n_layers)
for i in range(n_layers):
l_type = config["Layer" + str(i)]["type"]
if l_type == "fc":
size = int(config["Layer" + str(i)]["size"])
activation = ActivationType.get(config["Layer" + str(i)]["activation"])
net.add_fc(size, activation, verbose=verbose)
elif l_type == "do":
rate = float(config["Layer" + str(i)]["rate"])
net.add_drop_out(rate, verbose=verbose)
for key in config["Stats"]:
net.net_config["Stats"][key] = config["Stats"][key]
net.commit()
net.load(path + name + "/" + name) # + ".ckpt")
return net
def get_step_count(self):
return int(self.net_config["Stats"]["total_steps"])
class ActivationType(Enum):
RELU = tf.nn.relu
SIGMOID = tf.nn.sigmoid
@staticmethod
def get(act_type):
if act_type == "RELU":
return ActivationType.RELU
if act_type == "SIGMOID":
return ActivationType.SIGMOID
@staticmethod
def string_of(act_type):
if act_type == ActivationType.RELU:
return "RELU"
if act_type == ActivationType.SIGMOID:
return "SIGMOID"
class ReplayMemory:
def __init__(self, n_actions, discount_factor=0.97):
self.n_actions = n_actions
self.discount_factor = discount_factor
self.size = 0
self.states = []
self.q_values = [] # each entry is a list of n_actions float values
self.actions = []
self.rewards = []
self.predicted_q_values = []
self.estimation_errors = []
def add(self, state, q_values, action, reward):
"""
adds a new entry to memory
:param state: the state of the environment
:param q_values: the predicted q-values for every possible action in this state
:param action: the action that was chosen
:param reward: the reward received for the transition from state to next state through performing action
:return:
"""
self.states.append(state)
self.q_values.append(q_values)
self.actions.append(action)
self.rewards.append(reward)
self.size += 1
def update_q_values(self, sarsa=False):
"""
calculates the actual q-values from the predicted ones. during play only the predicted values are stored
as the real ones are not known (they depend on future states). ideally this method is called at the end of
an episode -> no future states exist/have any influence on the value of current or past states.
:param sarsa: whether the sarsa (state action reward state action) variant of q-value updates should be used
the sarsa-variant uses the q-value of the selected next action instead of the highest q-value next action
:return:
"""
if self.size < 1:
return -1
# save a copy of the predicted values for analysis
self.predicted_q_values = self.q_values[:]
start_time = time()
self.estimation_errors = np.zeros(shape=[self.size])
# the q-value of the last step should be the reward in that step
self.q_values[-1][self.actions[-1]] = self.rewards[-1]
# the update moves from the present to the past -> from the back to the front of the array
for i in reversed(range(self.size-1)):
action = self.actions[i]
reward = self.rewards[i]
# the q-value for the action is composed of the immediate reward + discounted future reward
if sarsa:
following_action = self.actions[i + 1]
action_value = reward + self.discount_factor * self.q_values[i + 1][following_action]
else:
action_value = reward + self.discount_factor * np.max(self.q_values[i+1])
self.estimation_errors[i] = abs(action_value - self.q_values[i][action])
# only the q-value for the selected action can be updated
self.q_values[i][action] = action_value
end_time = time()
print("Average estimation error:", Stat(self.estimation_errors))
return end_time - start_time
def get_random_batch(self, batch_size, duplicates=False):
"""
get a batch of randomly selected state-q_value-pairs
:param duplicates: whether duplicates are allowed
:param batch_size: the number of state-q_value-pairs returned; if not enough entries are available
and not duplicates are allowed less pairs are returned
:return: a list of states and a list of q-values. corresponding entries have the same index
"""
if self.size <= 0:
return None
# if the batch size is greater than the size of the memory the entire memory is returned
if batch_size > self.size-1 and not duplicates:
return self.states, self.q_values
selection = np.random.choice([i for i in range(self.size)], size=batch_size, replace=duplicates)
states_batch = np.array(self.states)[selection]
q_values_batch = np.array(self.q_values)[selection]
return states_batch, q_values_batch
def get_training_set(self, shuffle=False):
"""
:param shuffle: whether the set will be shuffled
:return: a set of training data containing the entire contents of this replay memory
"""
if shuffle:
ordering = [i for i in range(self.size)]
np.random.shuffle(ordering)
states = [self.states[i] for i in ordering]
q_values = [self.q_values[i] for i in ordering]
else:
states = self.states
q_values = self.q_values
return states, q_values
def clear(self):
"""
deletes all values from memory. only the number of actions and the discount factor are conserved
:return:
"""
self.size = 0
self.states = []
self.q_values = []
self.actions = []
self.rewards = []
self.estimation_errors = []
def write(self):
"""
saves the contents of this replay memory to the file system
:return:
"""
np.savetxt(TEMP_DIR + "estimation_errors.csv", self.estimation_errors, delimiter=",")
# np.savetxt(TEMP_DIR + "rewards.csv", self.rewards, delimiter=",") # replaced with lines 187-189
np.savetxt(TEMP_DIR + "q_values.csv", self.q_values, delimiter=",")
| np.savetxt(TEMP_DIR + "pred_q_values.csv", self.predicted_q_values, delimiter=",") | numpy.savetxt |
from __future__ import absolute_import, division
# External modules
import logging, os, sys
import numpy as np
from astropy.table import Table, Column
from astropy.cosmology import WMAP9 as cosmo
from astropy import units as u
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
# Local modules
from .convert_units import (DivideInterval, RadiiUnknown2Arcsec, RadiiUnknown2Parsec, RescaleArray)
from ..stellar_module import StarGenerator
from ..stellar_module import StarGenerator
from ..utilities import GetStipsData
from ..utilities import OffsetPosition
from ..utilities import SelectParameter
from ..utilities import StipsDataTable
#-----------
class SceneModule(object):
#-----------
def __init__(self, **kwargs):
"""
Noiseless scene generator module.
:Author: <NAME>
:Organization: Space Telescope Science Institute
:History:
* 2010/10/19 PLL created this module.
* 2011/06/14 PLL added single star simulation.
* 2011/06/28 PLL reorganized functions.
* 2011/10/28 PLL added galaxies simulation.
* 2014/02/14 BY modified the code to be instrument-independent
Examples
--------
>>> from stips import SceneModule
Parameters
----------
self: obj
Class instance.
**kwargs: dictionary
Additional arguments needed to make the scene
"""
self.out_path = SelectParameter('out_path', kwargs)
self.prefix = kwargs.get('out_prefix', 'sim')
self.cat_type = SelectParameter('cat_type', kwargs)
if 'logger' in kwargs:
self.logger = kwargs['logger']
else:
self.logger = logging.getLogger('__stips__')
log_level = SelectParameter('log_level', kwargs)
print("Log level: {}".format(log_level))
self.logger.setLevel(getattr(logging, log_level))
if not len(self.logger.handlers):
stream_handler = logging.StreamHandler(sys.stderr)
stream_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))# [in %(pathname)s:%(lineno)d]'))
self.logger.addHandler(stream_handler)
if 'scene_general' in kwargs:
self.ra = kwargs['scene_general'].get('ra', 0.0)
self.dec = kwargs['scene_general'].get('dec', 0.0)
self.seed = SelectParameter('seed', kwargs['scene_general'])
else:
self.ra = kwargs.get('ra', 0.0)
self.dec = kwargs.get('dec', 0.0)
self.seed = SelectParameter('seed', kwargs)
self.params = [ 'Random seed: {}'.format(self.seed) ]
msg = 'Centre (RA,DEC) = ({:.3f},{:.3f})'
self.params += [ msg.format(self.ra, self.dec) ]
self.catalogues = {}
#-----------
def CreatePopulation(self, pop, id=0):
"""
Generate a stellar population.
Output list will have these columns:
# ID
# RA
# DEC
# Distance
# Age
# Metallicity
# Mass
# Count Rate (in the chosen instrument/filter), Absolute
# Count Rate (in the chosen instrument/filter), Observed
Parameters
----------
self: obj
Class instance.
pop: dictionary
Information about the population. Includes:
n_stars: int
Number of stars
age_low,age_high: floating point
Minimum and maximum ages.
z_low,z_high: floating point
Minimum and maximum metallicities.
imf: string
Initial Mass Function for the population
alpha: float
Exponent (if imf = 'powerlaw')
binary_fraction: float
Binary Fraction
distribution: string
Stellar distribution in the sky (e.g. power law, inverse power law, uniform, etc.)
clustered: bool
Cluster higher masses to centre?
radius: float
Radius in (units)
radius_units: string
Units of radius (above)
distance_low,distance_high: floating point
Minimum and maximum distance (in kpc) of the 'cluster'
offset_ra,offset_dec: floating point
Offset of the cluster from the scene centre in arcseconds
Returns
-------
outList: string
The catalogue file produced
"""
star_chunk = 100000
age_bins = DivideInterval("1.0e6,1.35e10,d5")
met_bins = DivideInterval("-2.5,0.5,i0.1")
out_file = "{}_stars_{:03d}.{}".format(self.prefix, id, self.cat_type)
outList = os.path.join(self.out_path, out_file)
if os.path.isfile(outList):
os.remove(outList) # No append
data_table = StipsDataTable.dataTableFromFile(outList)
self._log("info","Creating catalogue {}".format(outList))
n_stars = int(pop['n_stars'])
age_l = age_bins[(np.abs(age_bins-float(pop['age_low']))).argmin()]
age_h = age_bins[(np.abs(age_bins-float(pop['age_high']))).argmin()]
age_bins = age_bins[np.where((age_bins>=age_l) & (age_bins<=age_h))]
met_l = met_bins[(np.abs(met_bins-float(pop['z_low']))).argmin()]
met_h = met_bins[(np.abs(met_bins-float(pop['z_high']))).argmin()]
met_bins = met_bins[np.where((met_bins>=met_l) & (met_bins<=met_h))]
imf = pop['imf']
alpha = abs(float(pop['alpha']))
distribution = pop['distribution']
clustered = pop['clustered']
radius = float(pop['radius'])
rad_units = pop['radius_units']
dist_l = float(pop['distance_low']) * 1.e3 #convert kpc to pc
dist_h = float(pop['distance_high']) * 1.e3 #convert kpc to pc
binary_fraction = float(pop['binary_fraction'])
offset_ra = float(pop['offset_ra'])/3600. #offset in RA arcseconds, convert to degrees.
offset_dec = float(pop['offset_dec'])/3600. #offset in DEC arcseconds, convert to degrees.
metadata = {'type': 'phoenix', 'id': id, 'n_stars': n_stars, 'age_l': age_l, 'age_h': age_h,
'met_l': met_l, 'met_h': met_h, 'imf': imf, 'alpha': alpha,
'distribution': distribution, 'clustered': clustered, 'radius': radius,
'radius_units': rad_units, 'dist_l': dist_l, 'dist_h': dist_h,
'offset_ra': offset_ra, 'offset_dec': offset_dec,
'name': 'Phoenix Stellar Population Table', 'bandpass': 'johnson,i'}
data_table.meta = metadata
self._log("info","Creating age and metallicity numbers")
# ages = np.random.RandomState(seed=self.seed).random_sample(size=len(age_bins))
ages = np.random.random_sample(size=len(age_bins))
ages /= ages.sum()
# mets = np.random.RandomState(seed=self.seed).random_sample(size=len(met_bins))
mets = np.random.random_sample(size=len(met_bins))
mets /= mets.sum()
self._log("info","Created age and metallicity numbers")
self._log("info","Creating stars")
#Generate star masses
datasets = 0
total = 0
for i, age in enumerate(age_bins):
self.logger.info("Age %g",age)
n_age = int(round(n_stars * ages[i]))
for j, met in enumerate(met_bins):
self.logger.info("Metallicity %f",met)
num_stars = int(round(n_age * mets[j]))
if num_stars == 0:
continue
self.logger.info("Creating %d stars",num_stars)
stargen = StarGenerator(age, met, imf=imf, alpha=alpha, seed=self.seed, logger=self.logger)
all_masses, all_rates, all_temps, all_gravs = stargen.make_cluster(num_stars)
all_x, all_y, all_z = self._MakeCoords(num_stars, radius, func=distribution, scale=2.8, do_z=True)
# all_distances = np.random.RandomState(seed=self.seed).uniform(low=dist_l, high=dist_h, size=num_stars)
all_distances = np.random.uniform(low=dist_l, high=dist_h, size=num_stars)
if clustered:
all_x, all_y, all_z = self._CenterObjByMass(all_x, all_y, all_masses, z=all_z)
# all_binaries = np.random.RandomState(seed=self.seed).binomial(1,binary_fraction,len(all_masses))
all_binaries = np.random.binomial(1,binary_fraction,len(all_masses))
idx = np.where(all_binaries==1)[0]
mb, rb, tb, gb = stargen.make_cluster(len(idx))
xb, yb, zb = all_x[idx], all_y[idx], all_z[idx]
db = all_distances[idx]
all_masses = np.insert(all_masses, idx, mb)
all_rates = np.insert(all_rates, idx, rb)
all_temps = np.insert(all_temps, idx, tb)
all_gravs = np.insert(all_gravs, idx, gb)
all_x = np.insert(all_x, idx, xb)
all_y = np.insert(all_y, idx, yb)
all_z = np.insert(all_z, idx, zb)
all_distances = np.insert(all_distances, idx, db)
all_binaries = np.insert(all_binaries, idx+1, 0)
num_stars += len(idx)
cached_ra = 0.
cached_dec = 0.
cached_distance = 0.
cached = False
for k in range(num_stars // star_chunk + 1):
xl, xh = k * star_chunk, min(k * star_chunk + star_chunk, num_stars-1)
star_set = xh - xl
self._log("info", "Chunk {}: {} stars".format(k+1, star_set))
masses = all_masses[xl:xh]
rates = all_rates[xl:xh]
temps = all_temps[xl:xh]
gravs = all_gravs[xl:xh]
x, y, z = all_x[xl:xh], all_y[xl:xh], all_z[xl:xh]
distances = all_distances[xl:xh]
binaries = all_binaries[xl:xh]
ids = np.arange(total + xl, total + xh) + 1
x = RadiiUnknown2Arcsec(x, rad_units, distances)
y = RadiiUnknown2Arcsec(y ,rad_units, distances)
z = RadiiUnknown2Parsec(z, rad_units, distances)
distances += z
ras = x/3600. #decimal degrees
decs = y/3600. #decimal degrees
base_ra,base_dec = OffsetPosition(self.ra,self.dec,offset_ra,offset_dec)
decs += base_dec
idxg = np.where(decs>90.)
idxl = np.where(decs<-90.)
decs[idxg] = 180. - decs[idxg]
ras[idxg] = 180. + ras[idxg]
decs[idxl] = -180. - decs[idxl]
ras[idxl] = 180. + ras[idxl]
ras = (ras + base_ra)%360
apparent_rates = rates + (5.0 * np.log10(distances) - 5.0)
t = Table()
t['id'] = Column(data=ids, format="%8d")
t['ra'] = Column(data=ras, unit=u.deg, format="%17.9e")
t['dec'] = Column(data=decs, unit=u.deg, format="%17.9e")
t['distance'] = Column(data=distances, unit='pc', format="%17.9e")
t['age'] = Column(data=np.full_like(ids, age), unit=u.yr, format="%17d")
t['metallicity'] = Column(data=np.full_like(ras, met), format="%4.1f")
t['mass'] = Column(data=masses,unit=u.Msun, format="%17.9e")
t['teff'] = Column(data=temps, unit='K', format="%13.8f")
t['log_g'] = Column(data=gravs, format="%12.9f")
t['binary'] = Column(data=binaries, format="%3d")
t['dataset'] = Column(data=np.full_like(ids, datasets), format="%6d")
t['absolute'] = Column(data=rates,unit=u.mag, format="%14.6e")
t['apparent'] = Column(data=apparent_rates,unit=u.mag, format="%12.4e")
data_table.write_chunk(t)
del t
datasets += 1
total += num_stars
self._log("info","Done creating catalogue")
return outList
#-----------
def CreateGalaxies(self, gals, id=0):
"""
Generate galaxies list.
Output list will have these columns:
# ID
# RA
# DEC
# Redshift
# Model
# Age
# Profile
# Half-flux_radius
# Axial_ratio
# Position_angle
# Johnson,V absolute
# Johnson,V apparent
Parameters
----------
self: obj
Class instance.
gals: dictionary
Information about the galaxies. Includes:
n_gals: int
Number of galaxies
z_low,z_high: float
Minimum and maximum redshifts (converted to distances?).
rad_low,rad_high: float
Minimum and maximum galactic half-light radii (in arcseconds)
sb_v_low, sb_v_high: float
Minimum and maximum V-band average surface brightness within rad
distribution: string
Stellar distribution in the sky (e.g. power law, inverse power law, uniform, etc.)
clustered: bool
Cluster higher masses to centre?
radius: float
Radius in (units)
radius_units: string
Units of radius (above)
offset_ra,offset_dec: float
Offset of cluster from scene centre in mas
Returns
-------
outList: string
The catalogue file produced
"""
bc95_models = np.array(('a','b','c','d','e','f'))
bc95_ages = np.array(("10E5","25E5","50E5","76E5","10E6","25E6","50E6","10E7","50E7","10E8","50E8","10E9"))
out_file = "{}_gals_{:03d}.{}".format(self.prefix, id, self.cat_type)
outList = os.path.join(self.out_path, out_file)
if os.path.isfile(outList):
os.remove(outList) # No append
data_table = StipsDataTable.dataTableFromFile(outList)
# Write star list (overwrite)
self.logger.info("Creating catalogue %s",outList)
# Generate galaxy list
n_gals = int(gals['n_gals'])
z_l = float(gals['z_low'])
z_h = float(gals['z_high'])
r_l = float(gals['rad_low'])
r_h = float(gals['rad_high'])
m_l = float(gals['sb_v_low'])
m_h = float(gals['sb_v_high'])
distribution = gals['distribution']
clustered = gals['clustered']
radius = float(gals['radius'])
rad_units = gals['radius_units']
offset_ra = float(gals['offset_ra'])/3600. #offset in RA arcseconds, convert to degrees.
offset_dec = float(gals['offset_dec'])/3600. #offset in DEC arcseconds, convert to degrees.
self._log("info","Wrote preamble")
self._log("info","Parameters are: {}".format(gals))
ids = np.arange(n_gals)
# Roughly 50% spiral, 50% elliptical
ellipRatio = 0.5
# binoDist = np.random.RandomState(seed=self.seed).binomial(1, ellipRatio, n_gals)
binoDist = np.random.binomial(1, ellipRatio, n_gals)
idx_ellip = np.where(binoDist == 1)
idx_spiral = np.where(binoDist != 1)
types = np.array( ['expdisk'] * n_gals )
types[idx_ellip] = 'devauc'
n_ellip = len( idx_ellip[0] )
n_spiral = n_gals - n_ellip
# Axial ratio
# Spiral = 0.1 to 1
# Elliptical = 0.5 to 1
axialRatioSpiralMin, axialRatioSpiralMax = 0.1, 1.0
axialRatioEllipMin, axialRatioEllipMax = 0.5, 1.0
axials = np.zeros(n_gals)
# axials[idx_spiral] = np.random.RandomState(seed=self.seed).uniform(axialRatioSpiralMin, axialRatioSpiralMax, n_spiral)
# axials[idx_ellip] = np.random.RandomState(seed=self.seed).uniform(axialRatioEllipMin, axialRatioEllipMax, n_ellip)
axials[idx_spiral] = np.random.uniform(axialRatioSpiralMin, axialRatioSpiralMax, n_spiral)
axials[idx_ellip] = np.random.uniform(axialRatioEllipMin, axialRatioEllipMax, n_ellip)
# Position angle
posAngleAlgo = 'uniform'
# angles = np.random.RandomState(seed=self.seed).uniform(0.0, 359.9, n_gals)
angles = np.random.uniform(0.0, 359.9, n_gals)
# Half-flux radius - uniform
# rads = np.random.RandomState(seed=self.seed).uniform(r_l, r_h, n_gals)
rads = np.random.uniform(r_l, r_h, n_gals)
# Redshifts
# If both z_low and z_high are zero, do local galaxies. Distance is 0.5 Mpc -- 50 Mpc.
# In the future, offer an option for straight distance or redshift.
if z_l == 0. and z_h == 0.:
z_label = "distance"
# distances = np.random.RandomState(seed=self.seed).uniform(5.e5, 5.e7, n_gals)
distances = np.random.uniform(5.e5, 5.e7, n_gals)
zs = distances / 1.e3
convs = np.log10(distances)
else:
z_label = "redshift"
# zs = np.random.RandomState(seed=self.seed).uniform(z_l, z_h, n_gals)
zs = np.random.uniform(z_l, z_h, n_gals)
distances = np.array(cosmo.comoving_distance(zs).to(u.pc))
convs = np.log10(np.array(cosmo.luminosity_distance(zs).to(u.pc)))
# Luminosity function - power law
lumPow = -1.8
# vmags = np.random.RandomState(seed=self.seed).power(np.abs(lumPow)+1.0, size=n_gals)
vmags = np.random.power(np.abs(lumPow)+1.0, size=n_gals)
if lumPow < 0: vmags = 1.0 - vmags
vmags = RescaleArray(vmags, m_l, m_h)
vmags_abs = vmags - 5*(convs-1.)
# models = np.random.RandomState(seed=self.seed).choice(bc95_models,size=n_gals)
# ages = np.random.RandomState(seed=self.seed).choice(bc95_ages,size=n_gals)
models = np.random.choice(bc95_models,size=n_gals)
ages = np.random.choice(bc95_ages,size=n_gals)
self._log("info","Making Co-ordinates")
x,y = self._MakeCoords(n_gals,radius,func=distribution,scale=2.8)
x = RadiiUnknown2Arcsec(x,rad_units,distances)
y = RadiiUnknown2Arcsec(y,rad_units,distances)
if clustered:
self._log("info","Clustering")
x,y = self._CenterObjByMass(x,y,1/vmags)
self._log("info","Converting Co-ordinates into RA,DEC")
ras = x/3600. #decimal degrees
decs = y/3600. #decimal degrees
base_ra,base_dec = OffsetPosition(self.ra,self.dec,offset_ra,offset_dec)
decs += base_dec
idxg = np.where(decs>90.)
idxl = np.where(decs<-90.)
decs[idxg] = 180. - decs[idxg]
ras[idxg] = 180. + ras[idxg]
decs[idxl] = -180. - decs[idxl]
ras[idxl] = 180. + ras[idxl]
ras = (ras + base_ra)%360
metadata = {'type': 'bc95', 'id': id, 'n_gals': n_gals, 'z_l': z_l, 'z_h': z_h,
'radius_l': r_l, 'radius_h': r_h, 'sb_v_l': m_l, 'sb_v_h': m_h,
'distribution': distribution, 'clustered': clustered, 'radius': radius,
'radius_units': rad_units, 'offset_ra': offset_ra, 'offset_dec': offset_dec,
'name': 'Galaxy Population Table', 'bandpass': 'johnson,v'}
data_table.meta = metadata
t = Table()
t['id'] = Column(data=ids)
t['ra'] = Column(data=ras,unit=u.deg)
t['dec'] = Column(data=decs,unit=u.deg)
t[z_label] = Column(data=zs)
t['model'] = Column(data=models)
t['age'] = Column(data=ages,unit=u.yr)
t['profile'] = Column(data=types)
t['radius'] = Column(data=rads)
t['axial_ratio'] = Column(data=axials,unit=u.deg)
t['pa'] = Column(data=angles,unit=u.deg)
t['absolute_surface_brightness'] = Column(data=vmags_abs,unit=u.mag)
t['apparent_surface_brightness'] = Column(data=vmags,unit=u.mag)
data_table.write_chunk(t)
self._log("info","Done creating catalogue")
return outList
#-----------
def _CenterObjByMass(self, x, y, mass, z=None):
"""
Place slightly more massive stars near image center
to simulate mass segragation.
Parameters
----------
self: obj
Class instance.
x, y: array_like
Initial coordinates of object placement.
mass: array_like
Stellar masses.
z: array_like, optional
Initial z co-ordinates of object placement. If provided, return 3D co-ordinates.
Returns
-------
new_x, new_y, [new_z]: array_like
Re-ordered `x` and `y`.
"""
x_cen = 0.
y_cen = 0.
z_cen = 0.
n_stars = x.size
# Central coordinates will have smallest values
dx = x - x_cen
dy = y - y_cen
if z is not None:
dz = z - z_cen
d = np.sqrt(dy*dy + dx*dx + dz*dz)
else:
d = np.sqrt(dy*dy + dx*dx)
i_sorted = np.argsort(d)
# Segregate mass
m_cut = mass.max() * 0.75 # Arbitrary cut of low/high masses
j_lo = np.where( mass <= m_cut )[0]
j_hi = np.where( mass > m_cut )[0]
# Place high masses with slightly more preference for center
j_sorted = | np.zeros(n_stars, dtype='int') | numpy.zeros |
#!/usr/bin/env python
# encoding: utf-8
# General methods.
#
# https://github.com/LucaZampierin/ABABSE
#
# Adapted from Trusca, Wassenberg, Frasincar and Dekker (2020). Changes have been made to adapt the methods
# to the current project and to adapt the scripts to TensorFlow 2.5.
# https://github.com/mtrusca/HAABSA_PLUS_PLUS
#
# <NAME>., <NAME>., <NAME>., <NAME>. (2020). A Hybrid Approach for aspect-based sentiment analysis using
# deep contextual word embeddings and hierarchical attention. 20th International Conference on Web Engineering (ICWE 2020)
# (Vol.12128, pp. 365–380). Springer
import numpy as np
import random
import string
def batch_index(length, batch_size, neg_samples, n_iter=100, is_shuffle=True):
"""
Method adapted from Trusca et al. (2020). Select indeces of the observations to be used in a batch
:param length: number of total observations
:param batch_size: number of observations in a batch
:param neg_samples: number of negative samples used for each observation
:param n_iter:
:param is_shuffle: shuffle or not the data, defaluts to True.
:return:
"""
index = list(range(length))
for j in range(n_iter):
if is_shuffle:
np.random.shuffle(index)
for i in range(int(length / batch_size) + (1 if length % batch_size else 0)):
# yield index[i * batch_size:(i + 1) * batch_size]
batch_index = index[i * batch_size:(i + 1) * batch_size]
if i == 0:
neg_batch_index = index[(i + 1) * batch_size:len(index)-1]
elif i == int(length / batch_size) + (1 if length % batch_size else 0)-1:
neg_batch_index = index[0 : i * batch_size]
else:
neg_batch_index = index[0:i * batch_size] + index[(i+1) * batch_size: len(index)-1]
neg_batch_index = random.sample(neg_batch_index, len(batch_index)*neg_samples)
yield batch_index, neg_batch_index
def load_word_id_mapping(word_id_file, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). Loads the word-to-id mapping
:param word_id_file: word-id mapping file path
:param encoding: file's encoding, for changing to unicode
:return: word-id mapping, like hello=5
"""
word_to_id = dict()
for line in open(word_id_file):
line = line.decode(encoding, 'ignore').lower().split()
word_to_id[line[0]] = int(line[1])
print('\nload word-id mapping done!\n')
return word_to_id
def index_to_word(w2v_file, index):
"""
:param w2v_file:
:param index:
:return:
"""
fp = open(w2v_file)
cnt = 0
for line in fp:
line = line.split()
cnt+=1
if cnt==index:
return line[0]
def load_w2v(w2v_file, embedding_dim, is_skip=False):
"""
Method obtained from Trusca et al. (2020). Loads the embedding matrix.
:param w2v_file: embedding path
:param embedding_dim: embedding dimensions
:param is_skip:
:return:
"""
fp = open(w2v_file)
if is_skip:
fp.readline()
w2v = []
word_dict = dict()
# [0,0,...,0] represent absent words
w2v.append([0.] * embedding_dim)
cnt = 0
for line in fp:
cnt += 1
line = line.split()
# line = line.split()
if len(line) != embedding_dim + 1:
print('a bad word embedding: {}'.format(line[0]))
continue
w2v.append([float(v) for v in line[1:]])
word_dict[line[0]] = cnt
w2v = np.asarray(w2v, dtype=np.float32)
w2v_sum = np.sum(w2v, axis=0, dtype=np.float32)
div = np.divide(w2v_sum, cnt, dtype=np.float32)
w2v = np.row_stack((w2v, div))
# w2v = np.row_stack((w2v, np.sum(w2v, axis=0) / cnt))
print(np.shape(w2v))
word_dict['$t$'] = (cnt + 1)
# w2v -= np.mean(w2v, axis=0)
# w2v /= np.std(w2v, axis=0)
print(word_dict['$t$'], len(w2v))
return word_dict, w2v
def load_word_embedding(word_id_file, w2v_file, embedding_dim, is_skip=False):
"""
Method obtained from Trusca et al. (2020). Loads the word embeddings.
:param word_id_file:
:param w2v_file:
:param embedding_dim:
:param is_skip:
:return:
"""
word_to_id = load_word_id_mapping(word_id_file)
word_dict, w2v = load_w2v(w2v_file, embedding_dim, is_skip)
cnt = len(w2v)
for k in word_to_id.keys():
if k not in word_dict:
word_dict[k] = cnt
w2v = np.row_stack((w2v, np.random.uniform(-0.01, 0.01, (embedding_dim,))))
cnt += 1
print(len(word_dict), len(w2v))
return word_dict, w2v
def load_aspect2id(input_file, word_id_mapping, w2v, embedding_dim):
"""
Method obtained from Trusca et al. (2020).
:param input_file:
:param word_id_mapping:
:param w2v:
:param embedding_dim:
:return:
"""
aspect2id = dict()
a2v = list()
a2v.append([0.] * embedding_dim)
cnt = 0
for line in open(input_file):
line = line.lower().split()
cnt += 1
aspect2id[' '.join(line[:-1])] = cnt
tmp = []
for word in line:
if word in word_id_mapping:
tmp.append(w2v[word_id_mapping[word]])
if tmp:
a2v.append(np.sum(tmp, axis=0) / len(tmp))
else:
a2v.append(np.random.uniform(-0.01, 0.01, (embedding_dim,)))
print(len(aspect2id), len(a2v))
return aspect2id, np.asarray(a2v, dtype=np.float32)
def change_to_onehot(y, pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020). One-hot-encoding of sentiment and aspect categories
:param y: vector to one-hot-encode
:param pos_neu_neg: True if senitment, false if aspect category. (defaults to True)
:return:
"""
from collections import Counter
print(Counter(y))
if pos_neu_neg:
class_set = ['1', '0', '-1']
else:
class_set = ['1','2','3','4','5','6','7','8','9','10','11','12']
n_class = len(class_set)
y_onehot_mapping = dict(zip(class_set, range(n_class)))
# print('THIS IS THE DICTIONARY')
# print(y_onehot_mapping)
onehot = []
for label in y:
tmp = [0] * n_class
tmp[y_onehot_mapping[label]] = 1
onehot.append(tmp)
return np.asarray(onehot, dtype=np.int32)
def load_inputs_twitter(input_file, word_id_file, sentence_len, type_='', is_r=True, target_len=10, encoding='utf8'):
"""
Method adapted from Trusca et al. (2020). Loads data matrices.
:param input_file: data path
:param word_id_file: word-to-id mapping
:param sentence_len: maximum sentence lenght
:param type_: different types of data loading
:param is_r: boolean reverse the sentence. defaults to True
:param target_len: maximum target lenght
:param encoding: defaults to 'utf8'
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, aspect, sen_len = [], [], [], []
x_neg, sen_len_neg = [], []
x_r, sen_len_r = [], []
target_words = []
tar_len = []
# target_neg = []
all_target, all_sent, all_y, all_aspect = [], [], [], []
# read in txt file
lines = open(input_file).readlines()
for i in range(0, len(lines), 4):
# targets
words = lines[i + 1].lower().split()
target = words
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
l = min(len(target_word), target_len)
tar_len.append(l)
# target_neg.append(target_word[:l])
target_words.append(target_word[:l] + [0] * (target_len - l))
# sentiment
y.append(lines[i + 2].strip().split()[0])
# aspect
aspect.append(lines[i + 3].strip().split()[0])
# left and right context
# words = lines[i].lower()
# words.translate(str.maketrans('', '', string.punctuation))
# words = words.split()
words = lines[i].lower().split()
sent = words
words_l, words_r = [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
# if word == '.' or word == ',' or word == ';' or word == ':' or word == '!' or word == '?':
# continue
if word in word_to_id:
words_l.append(word_to_id[word])
else:
# if word == '.' or word == ',' or word == ';' or word == ':' or word == '!' or word == '?':
# continue
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC' or type_ == 'LCR':
# words_l.extend(target_word)
words_neg = words_l + target_word + words_r
words_l = words_l[:sentence_len]
words_r = words_r[:sentence_len]
words_neg = words_neg[:sentence_len]
x_neg.append(words_neg + [0] * (sentence_len - len(words_neg)))
sen_len_neg.append(len(words_neg))
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
# tmp = target_word + words_r
tmp = words_r
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
all_sent.append(sent)
all_target.append(target)
else:
words = words_l + words_r
words = words[:sentence_len]
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
words_neg = words_l + target_word + words_r
words_neg = words_neg[:sentence_len]
sen_len_neg.append(len(words_neg))
x_neg.append(words_neg + [0] * (sentence_len - len(words_neg)))
all_sent.append(sent)
all_target.append(target)
all_y = y;
y = change_to_onehot(y, pos_neu_neg=True)
aspect = change_to_onehot(aspect, pos_neu_neg=False)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), \
np.asarray(sen_len_r), np.asarray(y)
elif type_ == 'TC':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), \
np.asarray(y), np.asarray(target_words), np.asarray(tar_len), np.asarray(all_sent), np.asarray(
all_target), np.asarray(all_y)
elif type_ == 'IAN':
return np.asarray(x), np.asarray(sen_len), np.asarray(target_words), \
np.asarray(tar_len), np.asarray(y), np.asarray(aspect), np.asarray(all_y), np.asarray(x_neg), np.asarray(sen_len_neg)
elif type_ == 'LCR':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), np.asarray(target_words), \
np.asarray(tar_len), np.asarray(y), np.asarray(aspect), np.asarray(all_y), np.asarray(x_neg), np.asarray(sen_len_neg)
else:
return np.asarray(x), np.asarray(sen_len), np.asarray(y)
def load_inputs_twitter_(input_file, word_id_file, sentence_len, type_='', is_r=True, target_len=10, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
target_words = []
tar_len = []
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
words = lines[i + 1].decode(encoding).lower().split()
# target_word = map(lambda w: word_to_id.get(w, 0), target_word)
# target_words.append([target_word[0]])
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
l = min(len(target_word), target_len)
tar_len.append(l)
target_words.append(target_word[:l] + [0] * (target_len - l))
y.append(lines[i + 2].strip().split()[0])
words = lines[i].decode(encoding).lower().split()
words_l, words_r = [], []
flag = 0
puncs = [',', '.', '!', ';', '-', '(']
for word in words:
if word == '$t$':
flag = 1
if flag == 1 and word in puncs:
flag = 2
if flag == 2:
if word in word_to_id:
words_r.append(word_to_id[word])
else:
if word == '$t$':
words_l.extend(target_word)
else:
if word in word_to_id:
words_l.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
words_l = words_l[:sentence_len]
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
tmp = words_r[:sentence_len]
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
else:
words = words_l + target_word + words_r
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
y = change_y_to_onehot(y)
print(x)
print(x_r)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), \
np.asarray(sen_len_r), np.asarray(y)
elif type_ == 'TC':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), \
np.asarray(y), np.asarray(target_words), np.asarray(tar_len)
else:
return np.asarray(x), np.asarray(sen_len), np.asarray(y)
def extract_aspect_to_id(input_file, aspect2id_file):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param aspect2id_file:
:return:
"""
dest_fp = open(aspect2id_file, 'w')
lines = open(input_file).readlines()
targets = set()
for i in range(0, len(lines), 3):
target = lines[i + 1].lower().split()
targets.add(' '.join(target))
aspect2id = list(zip(targets, range(1, len(lines) + 1)))
for k, v in aspect2id:
dest_fp.write(k + ' ' + str(v) + '\n')
def load_inputs_twitter_at(input_file, word_id_file, aspect_id_file, sentence_len, type_='', encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param word_id_file:
:param aspect_id_file:
:param sentence_len:
:param type_:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
if type(aspect_id_file) is str:
aspect_to_id = load_aspect2id(aspect_id_file)
else:
aspect_to_id = aspect_id_file
print('load aspect-to-id done!')
x, y, sen_len = [], [], []
aspect_words = []
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
aspect_word = ' '.join(lines[i + 1].lower().split())
aspect_words.append(aspect_to_id.get(aspect_word, 0))
y.append(lines[i + 2].split()[0])
words = lines[i].decode(encoding).lower().split()
ids = []
for word in words:
if word in word_to_id:
ids.append(word_to_id[word])
# ids = list(map(lambda word: word_to_id.get(word, 0), words))
sen_len.append(len(ids))
x.append(ids + [0] * (sentence_len - len(ids)))
cnt = 0
for item in aspect_words:
if item > 0:
cnt += 1
print('cnt=', cnt)
y = change_y_to_onehot(y)
for item in x:
if len(item) != sentence_len:
print('aaaaa=', len(item))
x = np.asarray(x, dtype=np.int32)
return x, np.asarray(sen_len), np.asarray(aspect_words), np.asarray(y)
def load_inputs_sentence(input_file, word_id_file, sentence_len, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param word_id_file:
:param sentence_len:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, sen_len = [], [], []
for line in open(input_file):
line = line.lower().decode('utf8', 'ignore').split('||')
y.append(line[0])
words = ' '.join(line[1:]).split()
xx = []
i = 0
for word in words:
if word in word_to_id:
xx.append(word_to_id[word])
i += 1
if i >= sentence_len:
break
sen_len.append(len(xx))
xx = xx + [0] * (sentence_len - len(xx))
x.append(xx)
y = change_y_to_onehot(y)
print('load input {} done!'.format(input_file))
return np.asarray(x), np.asarray(sen_len), np.asarray(y)
def load_inputs_document(input_file, word_id_file, max_sen_len, max_doc_len, _type=None, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param word_id_file:
:param max_sen_len:
:param max_doc_len:
:param _type:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, sen_len, doc_len = [], [], [], []
for line in open(input_file):
line = line.lower().decode('utf8', 'ignore').split('||')
# y.append(line[0])
t_sen_len = [0] * max_doc_len
t_x = np.zeros((max_doc_len, max_sen_len))
doc = ' '.join(line[1:])
sentences = doc.split('<sssss>')
i = 0
pre = ''
flag = False
for sentence in sentences:
j = 0
if _type == 'CNN':
sentence = pre + ' ' + sentence
if len(sentence.split()) < 5:
pre = sentence
continue
else:
pre = ''
for word in sentence.split():
if j < max_sen_len:
if word in word_to_id:
t_x[i, j] = word_to_id[word]
j += 1
else:
break
t_sen_len[i] = j
i += 1
flag = True
if i >= max_doc_len:
break
if flag:
doc_len.append(i)
sen_len.append(t_sen_len)
x.append(t_x)
y.append(line[0])
y = change_y_to_onehot(y)
print('load input {} done!'.format(input_file))
return np.asarray(x), np.asarray(y), np.asarray(sen_len), np.asarray(doc_len)
def load_inputs_document_nohn(input_file, word_id_file, max_sen_len, _type=None, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param word_id_file:
:param max_sen_len:
:param _type:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, sen_len = [], [], []
for line in open(input_file):
line = line.lower().decode('utf8', 'ignore').split('||')
words = ' '.join(line[1:]).split()
i = 0
tx = []
for word in words:
if i < max_sen_len:
if word in word_to_id:
tx.append(word_to_id[word])
i += 1
sen_len.append(i)
x.append(tx + [0] * (max_sen_len - i))
y.append(line[0])
y = change_y_to_onehot(y)
print('load input {} done!'.format(input_file))
return np.asarray(x), np.asarray(y), np.asarray(sen_len)
def load_sentence(src_file, word2id, max_sen_len, freq=5):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param src_file:
:param word2id:
:param max_sen_len:
:param freq:
:return:
"""
sf = open(src_file)
x1, x2, len1, len2, y = [], [], [], [], []
def get_q_id(q):
i = 0
tx = []
for word in q:
if i < max_sen_len and word in word2id:
tx.append(word2id[word])
i += 1
tx += ([0] * (max_sen_len - i))
return tx, i
for line in sf:
line = line.lower().split(' || ')
q1 = line[0].split()
q2 = line[1].split()
is_d = line[2][0]
tx, l = get_q_id(q1)
x1.append(tx)
len1.append(l)
tx, l = get_q_id(q2)
x2.append(tx)
len2.append(l)
y.append(is_d)
index = range(len(y))
# np.random.shuffle(index)
x1 = np.asarray(x1, dtype=np.int32)
x2 = np.asarray(x2, dtype=np.int32)
len1 = np.asarray(len1, dtype=np.int32)
len2 = np.asarray(len2, dtype=np.int32)
y = change_y_to_onehot(y)
return x1, x2, len1, len2, y
def load_inputs_full(input_file, word_id_file, sentence_len, type_='', is_r=True, target_len=10, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020). NOTE: not used in this project
:param input_file:
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
sent_final = []
target_words = []
tar_len = []
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
words = lines[i + 1].lower().split()
# target_word = map(lambda w: word_to_id.get(w, 0), target_word)
# target_words.append([target_word[0]])
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
l = min(len(target_word), target_len)
tar_len.append(l)
target_words.append(target_word[:l] + [0] * (target_len - l))
y.append(lines[i + 2].strip().split()[0])
words = lines[i].lower().split()
words_l, words_r, sent = [], [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
if word in word_to_id:
words_l.append(word_to_id[word])
else:
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
# words_l.extend(target_word)
words_l = words_l[:sentence_len]
words_r = words_r[:sentence_len]
sent.extend(words_l + target_word + words_r)
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
# tmp = target_word + words_r
tmp = words_r
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
sent_final.append(sent + [0] * (sentence_len - len(sent)))
else:
words = words_l + target_word + words_r
words = words[:sentence_len]
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
y = change_y_to_onehot(y)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), \
np.asarray(sen_len_r), np.asarray(y)
elif type_ == 'TC':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), \
np.asarray(y), np.asarray(target_words), np.asarray(tar_len), np.asarray(sent_final)
elif type_ == 'IAN':
return np.asarray(x), np.asarray(sen_len), | np.asarray(target_words) | numpy.asarray |
import numpy as np
from .qnumber import is_qsparse
__all__ = ['retained_bond_indices', 'split_matrix_svd', 'qr']
def retained_bond_indices(s, tol):
"""
Indices of retained singular values based on given tolerance.
"""
w = np.linalg.norm(s)
if w == 0:
return np.array([], dtype=int)
# normalized squares
s = (s / w)**2
# accumulate values from smallest to largest
sort_idx = np.argsort(s)
s[sort_idx] = np.cumsum(s[sort_idx])
return np.where(s > tol)[0]
def split_matrix_svd(A, q0, q1, tol):
"""
Split a matrix by singular value decomposition,
taking block sparsity structure dictated by quantum numbers into account,
and truncate small singular values based on tolerance.
"""
assert A.ndim == 2
assert len(q0) == A.shape[0]
assert len(q1) == A.shape[1]
assert is_qsparse(A, [q0, -q1])
# find common quantum numbers
qis = np.intersect1d(q0, q1)
if len(qis) == 0:
assert | np.linalg.norm(A) | numpy.linalg.norm |
import numpy as np
import pandas as pd
import scipy as sp
from scipy import optimize
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
prop_cycle = plt.rcParams["axes.prop_cycle"]
colors = prop_cycle.by_key()["color"]
class OLG:
""" The Class contain the OLG model
"""
#########################
# The M Definitions #
#########################
def __init__(self, **kwargs):
self.baseline_parameters() # A struct containing the parameters
self.primitive_functions()
self.update_parameters(kwargs)
self.population_growth() # Gather the natual and predicted growth rate (1950-2100)
def baseline_parameters(self):
# Demographics
#self.n = 0.51 # Initial growth rate
#self.N_iniY = 0.35
#self.N = [self.N_iniY,self.N_iniY/(1+self.n),self.N_iniY/(1+self.n)**2] # Initial population
# Household
self.rho = 0.2 # Discount rate
self.beta = 1/(1+self.rho) # Discount factor
self.phi = 0.8 # Leisure preference intensity
# Initial allocation assumption of middel aged
self.l1_m = 0.4
self.s1_m = 2
self.t = 0
# Production
self.alpha = 1/3 # Capital share in production
self.A = 5 # Tecnology level
self.delta = 0.2 # Depreciation rate
# Human Capital
self.delta_h = 0.072 # Depreciation
self.theta = 1 # Positive Scale
self.kappa = 0.8 # HC inheritance share
self.H = 1
self.E_share = 0.05
self.E = self.E_share * 5
self.epsilon = 0.10 # elasticity of education spending
self.gamma = 0.70 # elasticity of time allocation (gives weird results)
# Government
self.tau_k = 0.35 # Taxation of capital
self.tau_w = 0.2 # Taxation of labour
self.pi = 0.3 # Pension contribution rate
# Code Convenience
self.k_min = 1e-10
self.k_max = 20 # Note: Make sure capital wont reach this level and that the convergence is true (not to low density)
self.seed = 1999
self.T = 20
# Plot
self.Density = 30
self.y_axis_min = 0
self.x_axis_min = 0
def primitive_functions(self):
eps = 1e-12 # Minimum evaluation
# Utility Function (note: add leisure as a optimal function of c in same period)
self.u = lambda c,l,h: np.log(np.fmax(c,eps))+self.phi*np.log(np.fmax(l,eps)*np.fmax(h,eps))
# Production Function
self.f = lambda k: self.A*np.fmax(k,eps)**self.alpha*self.L**(1-self.alpha)
self.fprime = lambda k: self.alpha*self.A*np.fmax(k,eps)**(self.alpha-1)*self.L**(1-self.alpha)
# Human Capital Accumulation
self.h = lambda h,e: (1-self.delta_h) * h + self.theta * e**self.gamma * h * self.E**self.epsilon
# Effictive Wages
self.W = lambda w,h,tau_p: (1-self.tau_w-tau_p) * w * h
def update_parameters(self, kwargs):
# For interactive plots (widgets)
for key, value in kwargs.items():
setattr(self, key, value)
##########################
# Gather the growth rate #
##########################
def population_growth(self):
# a. Import the CSV file with population from UN population division
df = pd.read_csv('WPP2019_TotalPopulationBySex.csv')
# b. Choose the World population and Medium-variant projection based
A = df.loc[df['Location']=='World']
A = A.loc[df['VarID']==2]
# c. Find the Growth rate from 1950-2100
Pop = pd.concat([A['Time'],A['PopTotal']],axis=1)
Growth = Pop.PopTotal.pct_change().rename('Difference')
Growth = Growth[1:] # removes the first NaN (150 observation left)
# d. Find the 25 year average growth rate
five_year_growth_rate = Growth.groupby(np.arange(len(Growth))//25).mean()
self.n_data = (1+five_year_growth_rate.to_numpy())**25-1
# Setting the last periods to a constant growth rate
#self.n_data = np.append(self.n_data,[0.02]*(self.T-len(self.n_data)))
#self.n_data = np.append([0.1,0.18947953,0.33223047,0.50601531],self.n_data)
# Baseline Model
self.nfuture = -0.06
self.n_data = self.n_data[:]
self.n_data = np.append(self.n_data,[self.nfuture]*(self.T+1-len(self.n_data)))
# Setting the first growth rate
#self.n = self.n_data[0]
Init_young = 0.35
Init_growth = 0.4
self.N = [Init_young,Init_young/(1+Init_growth),Init_young/(1+Init_growth)**2]
# Creaating the Population
self.N_Pop = np.ndarray([self.T+2,3])
self.N_Pop[0,:]= self.N
for i in range(self.T+1):
self.N[2] = self.N[1]
self.N[1] = self.N[0]
self.N[0] = self.N[0]*(1+self.n_data[i])
self.N_Pop[i+1,:] = self.N
#####################
# Solving the Model #
#####################
def Pop_work(self,l0,e0,l1_m,h0):
self.L = self.N[0]*(1-l0-e0)*h0 + self.N[1]*(1-l1_m)*self.h(self.H/self.kappa,0)
return self.L
def solve_firm_problem(self,k,TA,h0,t=0):
# Unpack time allocations
l0, e0 ,l1_m = TA
# Update working population
self.Pop_work(l0,e0,l1_m,h0)
# Interest Rate
R = 1+self.fprime(k)-self.delta
# Wage Rate
w = self.f(k)*(1/self.L)-self.fprime(k)*(k/self.L)
return R, w
def Household_variables(self,c0,l0,e0,k,k_plus,kg):
# Gather the middel aged allocations
l1_m = self.l1_m
# Human capital
h0 = self.h(self.H,e0)
h1 = self.h(h0,0)
h2 = self.h(h1,0)
# Growth in h
hg = kg*0 #1.2 # ((self.H/self.kappa)-h0)/h0
k_plus2 = k_plus*1.04
# Define Timeallocations (We assume optimal allocation and doesnt change with time)
TA = [l0,e0,l1_m] # Current time allocations
# Current and future prices
tau_p = (self.N[2]/(self.N[0]+self.N[1]))*self.pi
R0, w0 = self.solve_firm_problem(k,TA,h0)
self.N = self.N_Pop[self.t+1,:]
tau_p1 = (self.N[2]/(self.N[0]+self.N[1]))*self.pi
h01 = self.h(h0*self.kappa,e0)
R1, w1 = self.solve_firm_problem(k_plus,TA,h01)
R2 = R1
w2 = w1
self.N = self.N_Pop[self.t,:]
# Future pension benefits
h1_mid = self.h(self.H/self.kappa,0)
Pens2 = self.pi * w2 * (h1 * (1-l1_m) + h0 * (1-l0-e0))
# Find leisure middel age (Optimal rule used)
W0 = self.W(w0, h0, tau_p)
W1 = self.W(w1, h1, tau_p1)
l1 = self.beta * (1+R1*(1-self.tau_k)) * (W0/W1)* l0
# Define Consumption middel age (Optimal rule used)
c1 = self.beta * (1+R1*(1-self.tau_k)) * c0
# Define savings for the two periods
s0 = (1-self.tau_w-tau_p) * (1-l0-e0) * w0 * h0 - c0
s1 = (1+R1*(1-self.tau_k))*s0 + (1-self.tau_w-tau_p1)*(1-l1)*w1*h1-c1
# Define consumption in the last period
c2 = (1+R2*(1-self.tau_k))*s1+Pens2
return h0, h1, h2, l1, c1, c2, s0, s1, tau_p
def lifetime_utility_young(self,x,k,k_plus,kg):
# Unpack the allocation parameters
c0 = x[0]
l0 = x[1]
e0 = x[2]
# gather the implication of the choices
I = self.Household_variables(c0,l0,e0,k,k_plus,kg)
# Find human capital initial
h0 = I[0]
h1 = I[1]
h2 = I[2]
# Future leisure
l1 = I[3]
l2 = 1
# Future consumption
c1 = I[4]
c2 = I[5]
U = self.u(c0,l0,h0)+self.beta*self.u(c1,l1,h1) + self.beta**2*self.u(c2,l2,h2)
return -U
def solve_household_problem(self,k,k_plus):
# Assume we are in steady state
kg = ((k_plus-k)/k)#((self.N[0]-self.N[1])/self.N[0])
if kg >=2:
kg = 1
# Initial Guess
x0 = [1,0.2,0.2]
# Bounds
bound_c = (0,k)
bound_l = (0,0.9)
bound_e = (0,1)
bnds = (bound_c,bound_l,bound_e)
# Constraints
def constraint1(x,k):
# Constraint c to be maximum equal wage ( w >= c)
TA = [x[1],x[2],self.l1_m]
h0 = self.h(self.H,x[2])
return self.solve_firm_problem(k,TA,h0)[1]*(1-self.tau_w)-x[0]
def constraint2(x): # (1 >= l + e)
return 1-x[1]-x[2]
con1 = {'type': 'ineq', 'args': (k, ), 'fun':constraint1}
con2 = {'type': 'ineq', 'fun':constraint2}
cons = [con1,con2]
# Optimization
result = optimize.minimize(self.lifetime_utility_young, x0, method = "SLSQP",\
args = (k, k_plus,kg, ), bounds = bnds, constraints=cons)
# a. Unpack
c0,l0,e0 = result.x
# b. Gather the savings
Answer = self.Household_variables(c0,l0,e0,k,k_plus,kg)
s0 = Answer[6]
s1 = self.s1_m # current saving of middel aged
# e. Aggregated savings
S = s0 * self.N[0] + s1*self.N[1]
return S, s0, s1, c0, l0, e0, Answer
def find_equilibrium(self, k_plus,disp=0):
# b objective function to minimize
def obj(k):
# saving
S = self.solve_household_problem(k,k_plus)[0]
# deviation of capital to day vs tomorrow
return (k_plus-S)**2
k_min = 0
k_max = self.k_max+1
k = optimize.fminbound(obj,k_min,k_max,disp=disp)
# Update mid age
return k
##############################
# Find the transition Curve #
##############################
def find_transition_curve(self):
# a determine the k_plus grid as all possible points
self.k_plus_grid = np.linspace(self.k_min, self.k_max, self.Density)
# b. implid current capital
self.k_grid = np.empty(self.Density)
for i, k_plus in enumerate(self.k_plus_grid):
k = self.find_equilibrium(k_plus)
self.k_grid[i] = k
#########################
# Simulating the Model #
#########################
def simulate(self, reset_seed=True, k_initial=1, shock = False, shock_permanent = True):
if reset_seed:
np.random.seed(self.seed)
self.find_transition_curve()
# a. initialize
# Capital and output
self.sim_k = np.empty(self.T)
self.sim_k[0] = k_initial
self.y_output = np.empty(self.T)
self.y_output[0] = self.f(k_initial)
# Population
self.pop = np.empty(self.T)
self.pop[0] = np.sum(self.N_Pop[0,:])
self.sim_n = np.empty(self.T)
self.sim_n[0] = self.n_data[0]
#self.N_overview = np.ndarray((self.T,3))
#self.N_overview[0,:] = self.N
# Variables at interest
self.sim_k_plus = np.empty(self.T)
self.sim_w = np.empty(self.T)
self.sim_r = np.empty(self.T)
self.sim_s0 = np.empty(self.T)
self.sim_s1 = np.empty(self.T)
self.sim_c0 = np.empty(self.T)
self.sim_c1 = np.empty(self.T)
self.sim_c2 = np.empty(self.T)
self.sim_l0 = np.empty(self.T)
self.sim_l1 = np.empty(self.T)
self.sim_e0 = np.empty(self.T)
self.sim_h0 = | np.empty(self.T) | numpy.empty |
"""Functions for DSC processing.
Created 15 October 2021
@authors: <NAME>
@institution: Barrow Neurological Institute
Translated from jupyter notebook DSCpipeline.ipynb by <NAME>
Functions:
estimate_R2s
estimate_delta_R2s
estimate_delta_R2s_dual_echo
"""
import numpy as np
def estimate_R2s(s1, s2, te1, te2):
"""Estimate R2* and TE=0 signal from dual echo signals.
Parameters
----------
s1 : float
Signal (first echo).
s2 : float
Signal (second echo).
te1 : float
First echo time (s).
te2 : float
Second echo time (s).
Returns
-------
s_te0 : float
Signal at zero echo time.
R2s : float
R2* (s^-1).
"""
R2s = (1/(te2 - te1)) * np.log(s1 / s2)
s_te0 = s1 * | np.exp(te1 * R2s) | numpy.exp |
import openpnm as op
import numpy as _np
from numpy.testing import assert_allclose
from openpnm.utils import remove_prop_deep
class HydraulicConductanceTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[5, 5, 5], spacing=1.0)
self.geo = op.geometry.GenericGeometry(network=self.net,
pores=self.net.Ps,
throats=self.net.Ts)
self.geo['pore.diameter'] = 1.0
self.geo['throat.diameter'] = 0.5
self.geo['pore.area'] = 1.0
self.geo['throat.cross_sectional_area'] = 0.5
self.phase = op.phase.GenericPhase(network=self.net)
self.phase['pore.viscosity'] = 1e-5
self.phys = op.physics.GenericPhysics(network=self.net,
phase=self.phase,
geometry=self.geo)
self.size_factors_dict = {"pore1": 0.123, "throat": 0.981, "pore2": 0.551}
def teardown_class(self):
mgr = op.Workspace()
mgr.clear()
def test_generic_hydraulic_size_factors_as_dict(self):
self.geo['throat.hydraulic_size_factors'] = self.size_factors_dict
mod = op.models.physics.hydraulic_conductance.generic_hydraulic
self.phys.add_model(propname='throat.g_hydraulic_conductance', model=mod)
self.phys.regenerate_models()
actual = self.phys['throat.g_hydraulic_conductance'].mean()
assert_allclose(actual, desired=9120.483231751232)
remove_prop_deep(self.geo, "throat.hydraulic_size_factors")
def test_generic_hydraulic_size_factors_as_array(self):
self.geo['throat.hydraulic_size_factors'] = 0.896
self.phys.regenerate_models("throat.g_hydraulic_conductance")
actual = self.phys['throat.g_hydraulic_conductance'].mean()
| assert_allclose(actual, desired=89600.0) | numpy.testing.assert_allclose |
# finufft module, ie python-user-facing access to (no-data-copy) interfaces
#
# Some default opts are stated here (in arg list, but not docstring).
# Barnett 10/31/17: changed all type-2 not to have ms,etc as an input but infer
# from size of f.
# Barnett 2018?: google-style docstrings for napoleon.
# Lu 03/10/20: added guru interface calls
# Anden 8/18/20: auto-made docstrings for the 9 simple/many routines
import numpy as np
import warnings
import numbers
from ctypes import byref
from ctypes import c_longlong
from ctypes import c_void_p
import finufft._finufft as _finufft
### Plan class definition
class Plan:
r"""
A non-uniform fast Fourier transform (NUFFT) plan
The ``Plan`` class lets the user exercise more fine-grained control over
the execution of an NUFFT. First, the plan is created with a certain set
of parameters (type, mode configuration, tolerance, sign, number of
simultaneous transforms, and so on). Then the nonuniform points are set
(source or target depending on the type). Finally, the plan is executed on
some data, yielding the desired output.
In the simple interface, all these steps are executed in a single call to
the ``nufft*`` functions. The benefit of separating plan creation from
execution is that it allows for plan reuse when certain parameters (like
mode configuration) or nonuniform points remain the same between different
NUFFT calls. This becomes especially important for small inputs, where
execution time may be dominated by initialization steps such as allocating
and FFTW plan and sorting the nonuniform points.
Example:
::
import numpy as np
import finufft
# set up parameters
n_modes = (1000, 2000)
n_pts = 100000
nufft_type = 1
n_trans = 4
# generate nonuniform points
x = 2 * np.pi * np.random.uniform(size=n_pts)
y = 2 * np.pi * np.random.uniform(size=n_pts)
# generate source strengths
c = (np.random.standard_normal(size=(n_trans, n_pts)),
+ 1J * np.random.standard_normal(size=(n_trans, n_pts)))
# initialize the plan
plan = finufft.Plan(nufft_type, n_modes, n_trans)
# set the nonuniform points
plan.setpts(x, y)
# execute the plan
f = plan.execute(c)
Also see ``python/examples/guru1d1.py`` and ``python/examples/guru2d1.py``.
Args:
nufft_type (int): type of NUFFT (1, 2, or 3).
n_modes_or_dim (int or tuple of ints): if ``nufft_type`` is 1 or 2,
this should be a tuple specifying the number of modes
in each dimension (for example, ``(50, 100)``),
otherwise, if `nufft_type`` is 3, this should be the
number of dimensions (between 1 and 3).
n_trans (int, optional): number of transforms to compute
simultaneously.
eps (float, optional): precision requested (>1e-16).
isign (int, optional): if non-negative, uses positive sign
exponential, otherwise negative sign.
**kwargs (optional): for more options, see :ref:`opts`.
"""
def __init__(self,nufft_type,n_modes_or_dim,n_trans=1,eps=1e-6,isign=None,**kwargs):
# set default isign based on if isign is None
if isign==None:
if nufft_type==2:
isign = -1
else:
isign = 1
# set opts and check precision type
opts = _finufft.NufftOpts()
_finufft._default_opts(opts)
is_single = setkwopts(opts,**kwargs)
# construct plan based on precision type and eps default value
plan = c_void_p(None)
# setting n_modes and dim for makeplan
n_modes = np.ones([3], dtype=np.int64)
if nufft_type==3:
npdim = np.asarray(n_modes_or_dim, dtype=np.int64)
if npdim.size != 1:
raise RuntimeError('FINUFFT type 3 plan n_modes_or_dim must be one number, the dimension')
dim = int(npdim)
else:
npmodes = np.asarray(n_modes_or_dim, dtype=np.int64)
if npmodes.size>3 or npmodes.size<1:
raise RuntimeError("FINUFFT n_modes dimension must be 1, 2, or 3")
dim = int(npmodes.size)
n_modes[0:dim] = npmodes[::-1]
n_modes = (c_longlong * 3)(*n_modes)
if is_single:
self._makeplan = _finufft._makeplanf
self._setpts = _finufft._setptsf
self._execute = _finufft._executef
self._destroy = _finufft._destroyf
else:
self._makeplan = _finufft._makeplan
self._setpts = _finufft._setpts
self._execute = _finufft._execute
self._destroy = _finufft._destroy
ier = self._makeplan(nufft_type, dim, n_modes, isign, n_trans, eps,
byref(plan), opts)
# check error
if ier != 0:
err_handler(ier)
# set C++ side plan as inner_plan
self.inner_plan = plan
# set properties
self.type = nufft_type
self.dim = dim
self.n_modes = n_modes
self.n_trans = n_trans
self.is_single = is_single
### setpts
def setpts(self,x=None,y=None,z=None,s=None,t=None,u=None):
r"""
Set the nonuniform points
For type 1, this sets the coordinates of the ``M`` nonuniform source
points, for type 2, it sets the coordinates of the ``M`` target
points, and for type 3 it sets both the ``M`` source points and the
``N`` target points.
The dimension of the plan determines the number of arguments supplied.
For example, if ``dim == 2``, we provide ``x`` and ``y`` (as well as
``s`` and ``t`` for a type-3 transform).
Args:
x (float[M]): first coordinate of the nonuniform points
(source for type 1 and 3, target for type 2).
y (float[M], optional): second coordinate of the nonuniform
points (source for type 1 and 3, target for type 2).
z (float[M], optional): third coordinate of the nonuniform
points (source for type 1 and 3, target for type 2).
s (float[N], optional): first coordinate of the nonuniform
points (target for type 3).
t (float[N], optional): second coordinate of the nonuniform
points (target for type 3).
u (float[N], optional): third coordinate of the nonuniform
points (target for type 3).
"""
if self.is_single:
# array sanity check
self._xj = _rchkf(x)
self._yj = _rchkf(y)
self._zj = _rchkf(z)
self._s = _rchkf(s)
self._t = _rchkf(t)
self._u = _rchkf(u)
else:
# array sanity check
self._xj = _rchk(x)
self._yj = _rchk(y)
self._zj = _rchk(z)
self._s = _rchk(s)
self._t = _rchk(t)
self._u = _rchk(u)
# valid sizes
dim = self.dim
tp = self.type
(self.nj, self.nk) = valid_setpts(tp, dim, self._xj, self._yj, self._zj, self._s, self._t, self._u)
# call set pts for single prec plan
if self.dim == 1:
ier = self._setpts(self.inner_plan, self.nj, self._xj, self._yj, self._zj, self.nk, self._s, self._t, self._u)
elif self.dim == 2:
ier = self._setpts(self.inner_plan, self.nj, self._yj, self._xj, self._zj, self.nk, self._t, self._s, self._u)
elif self.dim == 3:
ier = self._setpts(self.inner_plan, self.nj, self._zj, self._yj, self._xj, self.nk, self._u, self._t, self._s)
else:
raise RuntimeError("FINUFFT dimension must be 1, 2, or 3")
if ier != 0:
err_handler(ier)
### execute
def execute(self,data,out=None):
r"""
Execute the plan
Performs the NUFFT specified at plan instantiation with the points set
by ``setpts``. For type-1 and type-3 transforms, the input is a set of
source strengths, while for a type-2 transform, it consists of an
array of size ``n_modes``. If ``n_transf`` is greater than one,
``n_transf`` inputs are expected, stacked along the first axis.
Args:
data (complex[M], complex[n_transf, M], complex[n_modes], or complex[n_transf, n_modes]): The input source strengths
(type 1 and 3) or source modes (type 2).
out (complex[n_modes], complex[n_transf, n_modes], complex[M], or complex[n_transf, M], optional): The array where the
output is stored. Must be of the right size.
Returns:
complex[n_modes], complex[n_transf, n_modes], complex[M], or complex[n_transf, M]: The output array of the transform(s).
"""
if self.is_single:
_data = _cchkf(data)
_out = _cchkf(out)
else:
_data = _cchk(data)
_out = _cchk(out)
tp = self.type
n_trans = self.n_trans
nj = self.nj
nk = self.nk
dim = self.dim
if tp==1 or tp==2:
ms = self.n_modes[0]
mt = self.n_modes[1]
mu = self.n_modes[2]
# input shape and size check
if tp==2:
valid_fshape(data.shape,n_trans,dim,ms,mt,mu,None,2)
else:
valid_cshape(data.shape,nj,n_trans)
# out shape and size check
if out is not None:
if tp==1:
valid_fshape(out.shape,n_trans,dim,ms,mt,mu,None,1)
if tp==2:
valid_cshape(out.shape,nj,n_trans)
if tp==3:
valid_fshape(out.shape,n_trans,dim,None,None,None,nk,3)
# allocate out if None
if out is None:
if self.is_single:
pdtype=np.complex64
else:
pdtype=np.complex128
if tp==1:
_out = np.squeeze(np.zeros([n_trans, mu, mt, ms], dtype=pdtype, order='C'))
if tp==2:
_out = np.squeeze(np.zeros([n_trans, nj], dtype=pdtype, order='C'))
if tp==3:
_out = np.squeeze(np.zeros([n_trans, nk], dtype=pdtype, order='C'))
# call execute based on type and precision type
if tp==1 or tp==3:
ier = self._execute(self.inner_plan,
_data.ctypes.data_as(c_void_p),
_out.ctypes.data_as(c_void_p))
elif tp==2:
ier = self._execute(self.inner_plan,
_out.ctypes.data_as(c_void_p),
_data.ctypes.data_as(c_void_p))
else:
ier = 10
# check error
if ier != 0:
err_handler(ier)
# return out
if out is None:
return _out
else:
_copy(_out,out)
return out
def __del__(self):
destroy(self)
self.inner_plan = None
### End of Plan class definition
### <NAME>'s functions for checking input and output variables
def _rchk(x):
"""
Check if array x is of the appropriate type
(float64, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and x.dtype is not np.dtype('float64'):
raise RuntimeError('FINUFFT data type must be float64 for double precision, data may have mixed precision types')
return np.array(x, dtype=np.float64, order='C', copy=False)
def _cchk(x):
"""
Check if array x is of the appropriate type
(complex128, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and (x.dtype is not np.dtype('complex128') and x.dtype is not np.dtype('float64')):
raise RuntimeError('FINUFFT data type must be complex128 for double precision, data may have mixed precision types')
return np.array(x, dtype=np.complex128, order='C', copy=False)
def _rchkf(x):
"""
Check if array x is of the appropriate type
(float64, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and x.dtype is not np.dtype('float32'):
raise RuntimeError('FINUFFT data type must be float32 for single precision, data may have mixed precision types')
return np.array(x, dtype=np.float32, order='C', copy=False)
def _cchkf(x):
"""
Check if array x is of the appropriate type
(complex128, C-contiguous in memory)
If not, produce a copy
"""
if x is not None and (x.dtype is not np.dtype('complex64') and x.dtype is not np.dtype('float32')):
raise RuntimeError('FINUFFT data type must be complex64 for single precision, data may have mixed precision types')
return np.array(x, dtype=np.complex64, order='C', copy=False)
def _copy(_x, x):
"""
Copy _x to x, only if the underlying data of _x differs from that of x
"""
if _x.data != x.data:
x[:] = _x
### error handler (keep up to date with FINUFFT/include/defs.h)
def err_handler(ier):
switcher = {
1: 'FINUFFT eps tolerance too small to achieve',
2: 'FINUFFT malloc size requested greater than MAX_NF',
3: 'FINUFFT spreader fine grid too small compared to kernel width',
4: 'FINUFFT spreader nonuniform point out of range [-3pi,3pi]^d in type 1 or 2',
5: 'FINUFFT spreader malloc error',
6: 'FINUFFT spreader illegal direction (must be 1 or 2)',
7: 'FINUFFT opts.upsampfac not > 1.0',
8: 'FINUFFT opts.upsampfac not a value with known Horner polynomial rule',
9: 'FINUFFT number of transforms ntrans invalid',
10: 'FINUFFT transform type invalid',
11: 'FINUFFT general malloc failure',
12: 'FINUFFT number of dimensions dim invalid',
13: 'FINUFFT spread_thread option invalid',
}
err_msg = switcher.get(ier,'Unknown error')
if ier == 1:
warnings.warn(err_msg, Warning)
else:
raise RuntimeError(err_msg)
### valid sizes when setpts
def valid_setpts(tp,dim,x,y,z,s,t,u):
if x.ndim != 1:
raise RuntimeError('FINUFFT x must be a vector')
nj = x.size
if tp == 3:
nk = s.size
if s.ndim != 1:
raise RuntimeError('FINUFFT s must be a vector')
else:
nk = 0
if dim > 1:
if y.ndim != 1:
raise RuntimeError('FINUFFT y must be a vector')
if y.size != nj:
raise RuntimeError('FINUFFT y must have same length as x')
if tp==3:
if t.ndim != 1:
raise RuntimeError('FINUFFT t must be a vector')
if t.size != nk:
raise RuntimeError('FINUFFT t must have same length as s')
if dim > 2:
if z.ndim != 1:
raise RuntimeError('FINUFFT z must be a vector')
if z.size != nj:
raise RuntimeError('FINUFFT z must have same length as x')
if tp==3:
if u.ndim != 1:
raise RuntimeError('FINUFFT u must be a vector')
if u.size != nk:
raise RuntimeError('FINUFFT u must have same length as s')
return (nj, nk)
### ntransf for type 1 and type 2
def valid_ntr_tp12(dim,shape,n_transin,n_modesin):
if len(shape) == dim+1:
n_trans = shape[0]
n_modes = shape[1:dim+1]
elif len(shape) == dim:
n_trans = 1
n_modes = shape
else:
raise RuntimeError('FINUFFT type 1 output dimension or type 2 input dimension must be either dim or dim+1(n_trans>1)')
if n_transin is not None and n_trans != n_transin:
raise RuntimeError('FINUFFT input n_trans and output n_trans do not match')
if n_modesin is not None:
if n_modes != n_modesin:
raise RuntimeError('FINUFFT input n_modes and output n_modes do not match')
return (n_trans,n_modes)
### valid number of transforms
def valid_ntr(x,c):
n_trans = int(c.size/x.size)
if n_trans*x.size != c.size:
raise RuntimeError('FINUFFT c.size must be divisible by x.size')
valid_cshape(c.shape,x.size,n_trans)
return n_trans
### valid shape of c
def valid_cshape(cshape,xsize,n_trans):
if n_trans == 1:
if len(cshape) != 1:
raise RuntimeError('FINUFFT c.ndim must be 1 if n_trans = 1')
if cshape[0] != xsize:
raise RuntimeError('FINUFFT c.size must be same as x.size if n_trans = 1')
if n_trans > 1:
if len(cshape) != 2:
raise RuntimeError('FINUFFT c.ndim must be 2 if n_trans > 1')
if cshape[1] != xsize or cshape[0] != n_trans:
raise RuntimeError('FINUFFT c.shape must be (n_trans, x.size) if n_trans > 1')
### valid shape of f
def valid_fshape(fshape,n_trans,dim,ms,mt,mu,nk,tp):
if tp == 3:
if n_trans == 1:
if len(fshape) != 1:
raise RuntimeError('FINUFFT f.ndim must be 1 for type 3 if n_trans = 1')
if fshape[0] != nk:
raise RuntimeError('FINUFFT f.size of must be nk if n_trans = 1')
if n_trans > 1:
if len(fshape) != 2:
raise RuntimeError('FINUFFT f.ndim must be 2 for type 3 if n_trans > 1')
if fshape[1] != nk or fshape[0] != n_trans:
raise RuntimeError('FINUFFT f.shape must be (n_trans, nk) if n_trans > 1')
else:
if n_trans == 1:
if len(fshape) != dim:
raise RuntimeError('FINUFFT f.ndim must be same as the problem dimension for type 1 or 2 if n_trans = 1')
if n_trans > 1:
if len(fshape) != dim+1:
raise RuntimeError('FINUFFT f.ndim must be same as the problem dimension + 1 for type 1 or 2 if n_trans > 1')
if fshape[0] != n_trans:
raise RuntimeError('FINUFFT f.shape[0] must be n_trans for type 1 or 2 if n_trans > 1')
if fshape[-1] != ms:
raise RuntimeError('FINUFFT f.shape is not consistent with n_modes')
if dim>1:
if fshape[-2] != mt:
raise RuntimeError('FINUFFT f.shape is not consistent with n_modes')
if dim>2:
if fshape[-3] != mu:
raise RuntimeError('FINUFFT f.shape is not consistent with n_modes')
### check if dtype is single or double
def is_single_dtype(dtype):
dtype = np.dtype(dtype)
if dtype == np.dtype('float64') or dtype == | np.dtype('complex128') | numpy.dtype |
"""
functions to handle data loading
TO DO:
data augmentation for xyz coordinates
data augmentation by adding gaussian noise
"""
import numpy as np
import copy
import tensorflow as tf
from collections import Counter
from numpy.linalg import norm
class Data_Loader:
input_mean = None
input_std = None
output_mean = None
output_std = None
datatype = np.float32
def __init__(
self,
filename="data.npz",
shuffle=True,
input_label=["xyz"],
target_label=["pe"],
n_sample=10000,
test_sample=1000,
batch_size=100,
num_epoch=10,
weight_by_pe=True,
weight_by_label=True,
ngrid=100,
test_only=False,
input_norm=True,
output_norm=True,
):
"""load the data from npz file
:param filename: str, root dir of .npy data
:param shuffle, boolean,
whether or not to shuffle training data
:param input_label: list of str that define the input
:param target_label: list of str that define the output
:param n_sample: int, number of training samples to use
"""
# load data from data.npz
data = dict(np.load(filename))
for k in input_label + target_label:
data[k] = data[k].astype(self.datatype)
n_config = data[input_label[0]].shape[0]
# if n_sample is too big
if (n_sample+test_sample) > n_config:
if test_sample < n_config:
n_sample = n_config - test_sample
else:
n_sample = n_config //2
test_sample = n_config - n_sample
# shuffle data and target with the same permutation
if shuffle:
r = np.random.permutation(n_config)
else:
r = np.arange(n_config)
for k in data.keys():
np.take(data[k], r, axis=0, out=data[k])
data[k] = data[k][:n_sample+test_sample]
if "label" in data.keys():
minlabel = np.min(data["label"])
if minlabel != 0:
print(f"WARNING, the data label will be shifted {minlabel}")
data["label"] = | np.int32(data["label"] - minlabel) | numpy.int32 |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for cartesian_4d_velocity_effector.py."""
import copy
from absl.testing import absltest
from absl.testing import parameterized
from dm_control import mjcf
from dm_robotics.geometry import geometry
from dm_robotics.geometry import mujoco_physics
from dm_robotics.moma.effectors import cartesian_4d_velocity_effector
from dm_robotics.moma.effectors import test_utils
from dm_robotics.moma.models.robots.robot_arms import sawyer
import numpy as np
class Cartesian4DVelocityEffectorTest(parameterized.TestCase):
def test_zero_xy_rot_vel_pointing_down(self):
# When the arm is pointing straight down, the default effector shouldn't
# apply any X or Y rotations.
arm = sawyer.Sawyer(with_pedestal=False)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
effector_6d = test_utils.Spy6dEffector(arm.wrist_site)
effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d, element=arm.wrist_site, effector_prefix='sawyer_4d')
arm.set_joint_angles(
physics, joint_angles=test_utils.SAFE_SAWYER_JOINTS_POS)
physics.step() # propagate the changes to the rest of the physics.
# Send an XYZ + Z rot command. We shouldn't see any XY rotation components.
effector_4d.set_control(physics, command=np.ones(4) * 0.1)
np.testing.assert_allclose(effector_6d.previous_action,
[0.1, 0.1, 0.1, 0.0, 0.0, 0.1],
atol=1e-3, rtol=0.0)
def test_nonzero_xy_rot_vel_not_pointing_down(self):
# When the arm is NOT pointing straight down, the default effector should
# apply X and Y rotations to push it back to the desired quat.
arm = sawyer.Sawyer(with_pedestal=False)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
effector_6d = test_utils.Spy6dEffector(arm.wrist_site)
effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d, element=arm.wrist_site, effector_prefix='sawyer_4d')
# random offset to all joints.
joint_angles = test_utils.SAFE_SAWYER_JOINTS_POS + 0.1
arm.set_joint_angles(physics, joint_angles=joint_angles)
physics.step() # propagate the changes to the rest of the physics.
# Send an XYZ + Z rot command. We SHOULD see XY rotation components.
effector_4d.set_control(physics, command=np.ones(4) * 0.1)
xy_rot_components = effector_6d.previous_action[3:5]
self.assertFalse(np.any(np.isclose(xy_rot_components, np.zeros(2))))
def test_limiting_to_workspace(self):
arm = sawyer.Sawyer(with_pedestal=False)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
effector_6d = test_utils.Spy6dEffector(arm.wrist_site)
effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d, element=arm.wrist_site, effector_prefix='sawyer_4d')
arm.set_joint_angles(
physics, joint_angles=test_utils.SAFE_SAWYER_JOINTS_POS)
physics.step() # propagate the changes to the rest of the physics.
# The arm is pointing down in front of the base. Create a workspace
# that encompasses it, and check that all commands are valid.
min_workspace_limits = np.array([0.0, -0.5, 0.0])
max_workspace_limits = np.array([0.9, 0.5, 0.5])
effector_with_limits = cartesian_4d_velocity_effector.limit_to_workspace(
effector_4d, arm.wrist_site, min_workspace_limits, max_workspace_limits)
effector_with_limits.set_control(physics, command=np.ones(4) * 0.1)
np.testing.assert_allclose(effector_6d.previous_action,
[0.1, 0.1, 0.1, 0.0, 0.0, 0.1],
atol=1e-3, rtol=0.0)
# The arm is pointing down in front of the base. Create a workspace
# where the X position is in bounds, but Y and Z are out of bounds.
min_workspace_limits = np.array([0.0, -0.9, 0.5])
max_workspace_limits = np.array([0.9, -0.5, 0.9])
effector_with_limits = cartesian_4d_velocity_effector.limit_to_workspace(
effector_4d, arm.wrist_site, min_workspace_limits, max_workspace_limits)
# The action should only affect DOFs that are out of bounds and are moving
# away from where they should.
effector_with_limits.set_control(physics, command=np.ones(4) * 0.1)
np.testing.assert_allclose(effector_6d.previous_action,
[0.1, 0.0, 0.1, 0.0, 0.0, 0.1],
atol=1e-3, rtol=0.0)
def test_limiting_wrist_rotation(self):
arm = sawyer.Sawyer(with_pedestal=False)
physics = mjcf.Physics.from_mjcf_model(arm.mjcf_model)
effector_6d = test_utils.Spy6dEffector(arm.wrist_site)
effector_4d = cartesian_4d_velocity_effector.Cartesian4dVelocityEffector(
effector_6d, element=arm.wrist_site, effector_prefix='sawyer_4d')
arm.set_joint_angles(
physics, joint_angles=test_utils.SAFE_SAWYER_JOINTS_POS)
physics.step() # propagate the changes to the rest of the physics.
# The arm is pointing down in front of the base. Create a workspace
# that encompasses it.
min_workspace_limits = | np.array([0.0, -0.5, 0.0]) | numpy.array |
# -*- coding: utf-8 -*-
"""World Fertility Survey: Fiji"""
__all__ = ['COPYRIGHT','TITLE','SOURCE','DESCRSHORT','DESCRLONG','NOTE', 'load']
__docformat__ = 'restructuredtext'
COPYRIGHT = """Available for use in academic research. See SOURCE."""
TITLE = __doc__
SOURCE = """
The source data was obtained from <NAME>'s web site at Princeton
http://data.princeton.edu/wws509/datasets/#ceb, with the following refernce.
::
Little, <NAME>. (1978). Generalized Linear Models for Cross-Classified Data
from the WFS. World Fertility Survey Technical Bulletins, Number 5.
It originally comes from the World Fertility Survey for Fiji
http://opr.princeton.edu/archive/wfs/fj.aspx.
The terms of use for the original dataset are:
Data may be used for academic research, provided that credit is given in any
publication resulting from the research to the agency that conducted the
survey and that two copies of any publication are sent to::
Mr. <NAME>
Government Statistician
Bureau of Statistics
Government Buildings
P.O. Box 2221
Suva
Fiji
"""
DESCRSHORT = """Fiji Fertility Survey"""
DESCRLONG = """World Fertily Surveys: Fiji Fertility Survey.
Data represents grouped individual data."""
#suggested notes
NOTE = """
Number of observations - 70
Number of variables - 7
Variable name definitions::
totchild - total number of children ever born in the group
dur - marriage duration (1=0-4, 2=5-9, 3=10-14, 4=15-19, 5=20-24,
6=25-29)
res - residence (1=Suva, 2=Urban, 3=Rural)
edu - education (1=none, 2=lower primary, 3=upper primary,
4=secondary+)
nwomen - number of women in the group
"""
from numpy import recfromtxt, column_stack, array
from scikits.statsmodels.datasets import Dataset
from os.path import dirname, abspath
from scikits.statsmodels.tools import categorical
def load():
"""
Load the Fiji WFS data and return a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = recfromtxt(open(filepath + '/wfs.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,6))
names = ["totchild"] + list(data.dtype.names)
##### SET THE INDEX #####
endog = | array(data[names[4]]*data[names[5]], dtype=float) | numpy.array |
from PIL import Image
from toolz.curried import get
import numpy as np
import torch
from ignite.metrics import Metric
from horch.legacy.train.metrics import Average
class MeanIoU(Metric):
def __init__(self, num_classes, ignore_index=None):
self.num_classes = num_classes
self.ignore_index = ignore_index
super().__init__(self.output_transform)
def reset(self):
self.total_cm = np.zeros(self.num_classes, self.num_classes)
def update(self, output):
cm = output
self.total_cm += cm
def output_transform(self, output):
y_true, y_pred = get(["y_true", "y_pred"], output)
c = self.num_classes
if isinstance(y_true, Image.Image):
y_true = [ | np.array(img) | numpy.array |
import numpy as np
from abc import ABCMeta, abstractmethod
from scipy import signal
from typing import List
from scipy import signal
from scipy.spatial.transform import Rotation as R
class Target(ABCMeta):
def __init__(self):
pass
@abstractmethod
def find_projection_along_path(self, current_point):
pass
@abstractmethod
def spin(self, timestep, n1, n2, dims):
pass
class Circle(Target):
def __init__(self, wavelength = 500, amplitude = 0.025, \
center = [0.0, 0.0, 0.0]):
self.wavelength = wavelength
self.amplitude = amplitude
self.center = center
def find_projection_along_path(self, current_point):
ang1 = np.arctan2(self.center[0], self.center[2])
ang2 = np.arctan2(self.center[0], self.center[2])
return (ang1 - ang2) % (2. * np.pi)
def spin(self, timestep, n1, n2, dims, current_point):
target = | np.empty([n2 - n1, dims]) | numpy.empty |
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
# Lithium_Ion_LiNiMnCoO2_18650.py
#
# Created: Feb 2020, <NAME>
# Modified: Sep 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units , Data
from .Lithium_Ion import Lithium_Ion
from SUAVE.Methods.Power.Battery.Cell_Cycle_Models.LiNiMnCoO2_cell_cycle_model import compute_NMC_cell_state_variables
from SUAVE.Methods.Power.Battery.compute_net_generated_battery_heat import compute_net_generated_battery_heat
import numpy as np
import os
from scipy.integrate import cumtrapz
from scipy.interpolate import RegularGridInterpolator
## @ingroup Components-Energy-Storages-Batteries-Constant_Mass
class Lithium_Ion_LiNiMnCoO2_18650(Lithium_Ion):
""" Specifies discharge/specific energy characteristics specific
18650 lithium-nickel-manganese-cobalt-oxide battery cells
Assumptions:
Convective Thermal Conductivity Coefficient corresponds to forced
air cooling in 35 m/s air
Source:
Automotive Industrial Systems Company of Panasonic Group, Technical Information of
NCR18650G, URL https://www.imrbatteries.com/content/panasonic_ncr18650g.pdf
convective heat transfer coefficient, h
Jeon, Dong Hyup, and Seung Man Baek. "Thermal modeling of cylindrical
lithium ion battery during discharge cycle." Energy Conversion and Management
52.8-9 (2011): 2973-2981.
thermal conductivity, k
Yang, Shuting, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
specific heat capacity, Cp
(axial and radial)
<NAME>, et al. "A Review of Lithium-Ion Battery Thermal Management
System Strategies and the Evaluate Criteria." Int. J. Electrochem. Sci 14
(2019): 6077-6107.
# Electrode Area
Muenzel, Valentin, et al. "A comparative testing study of commercial
18650-format lithium-ion battery cells." Journal of The Electrochemical
Society 162.8 (2015): A1592.
Inputs:
None
Outputs:
None
Properties Used:
N/A
"""
def __defaults__(self):
self.tag = 'Lithium_Ion_LiNiMnCoO2_Cell'
self.cell.diameter = 0.0185 # [m]
self.cell.height = 0.0653 # [m]
self.cell.mass = 0.048 * Units.kg # [kg]
self.cell.surface_area = (np.pi*self.cell.height*self.cell.diameter) + (0.5*np.pi*self.cell.diameter**2) # [m^2]
self.cell.volume = np.pi*(0.5*self.cell.diameter)**2*self.cell.height
self.cell.density = self.cell.mass/self.cell.volume # [kg/m^3]
self.cell.electrode_area = 0.0342 # [m^2]
self.cell.max_voltage = 4.2 # [V]
self.cell.nominal_capacity = 3.55 # [Amp-Hrs]
self.cell.nominal_voltage = 3.6 # [V]
self.cell.charging_voltage = self.cell.nominal_voltage # [V]
self.watt_hour_rating = self.cell.nominal_capacity * self.cell.nominal_voltage # [Watt-hours]
self.specific_energy = self.watt_hour_rating*Units.Wh/self.cell.mass # [J/kg]
self.specific_power = self.specific_energy/self.cell.nominal_capacity # [W/kg]
self.resistance = 0.025 # [Ohms]
self.specific_heat_capacity = 1108 # [J/kgK]
self.cell.specific_heat_capacity = 1108 # [J/kgK]
self.cell.radial_thermal_conductivity = 0.4 # [J/kgK]
self.cell.axial_thermal_conductivity = 32.2 # [J/kgK] # estimated
battery_raw_data = load_battery_results()
self.discharge_performance_map = create_discharge_performance_map(battery_raw_data)
return
def energy_calc(self,numerics,battery_discharge_flag = True ):
'''This is an electric cycle model for 18650 lithium-nickel-manganese-cobalt-oxide
battery cells. The model uses experimental data performed
by the Automotive Industrial Systems Company of Panasonic Group
Sources:
Internal Resistance Model:
<NAME>., <NAME>., <NAME>., and <NAME>., "Combined State of Charge and State of
Health estimation over lithium-ion battery cellcycle lifespan for electric
vehicles,"Journal of Power Sources, Vol. 273, 2015, pp. 793-803.
doi:10.1016/j.jpowsour.2014.09.146,URLhttp://dx.doi.org/10.1016/j.jpowsour.2014.09.146.
Battery Heat Generation Model and Entropy Model:
Jeon, <NAME>, and <NAME>. "Thermal modeling of cylindrical lithium ion
battery during discharge cycle." Energy Conversion and Management 52.8-9 (2011):
2973-2981.
Assumtions:
1) All battery modules exhibit the same themal behaviour.
Inputs:
battery.
I_bat (max_energy) [Joules]
cell_mass (battery cell mass) [kilograms]
Cp (battery cell specific heat capacity) [J/(K kg)]
t (battery age in days) [days]
T_ambient (ambient temperature) [Kelvin]
T_current (pack temperature) [Kelvin]
T_cell (battery cell temperature) [Kelvin]
E_max (max energy) [Joules]
E_current (current energy) [Joules]
Q_prior (charge throughput) [Amp-hrs]
R_growth_factor (internal resistance growth factor) [unitless]
inputs.
I_bat (current) [amps]
P_bat (power) [Watts]
Outputs:
battery.
current_energy [Joules]
cell_temperature [Kelvin]
resistive_losses [Watts]
load_power [Watts]
current [Amps]
battery_voltage_open_circuit [Volts]
cell_charge_throughput [Amp-hrs]
internal_resistance [Ohms]
battery_state_of_charge [unitless]
depth_of_discharge [unitless]
battery_voltage_under_load [Volts]
'''
# Unpack varibles
battery = self
I_bat = battery.inputs.current
P_bat = battery.inputs.power_in
electrode_area = battery.cell.electrode_area
As_cell = battery.cell.surface_area
T_current = battery.pack_temperature
T_cell = battery.cell_temperature
E_max = battery.max_energy
E_current = battery.current_energy
Q_prior = battery.cell_charge_throughput
battery_data = battery.discharge_performance_map
I = numerics.time.integrate
D = numerics.time.differentiate
# ---------------------------------------------------------------------------------
# Compute battery electrical properties
# ---------------------------------------------------------------------------------
# Calculate the current going into one cell
n_series = battery.pack_config.series
n_parallel = battery.pack_config.parallel
n_total = battery.pack_config.total
Nn = battery.module_config.normal_count
Np = battery.module_config.parallel_count
n_total_module = Nn*Np
if battery_discharge_flag:
I_cell = I_bat/n_parallel
else:
I_cell = -I_bat/n_parallel
# State of charge of the battery
initial_discharge_state = np.dot(I,P_bat) + E_current[0]
SOC_old = np.divide(initial_discharge_state,E_max)
# Make sure things do not break by limiting current, temperature and current
SOC_old[SOC_old < 0.] = 0.
SOC_old[SOC_old > 1.] = 1.
T_cell[T_cell<272.65] = 272.65
T_cell[T_cell>322.65] = 322.65
battery.cell_temperature = T_cell
battery.pack_temperature = T_cell
# ---------------------------------------------------------------------------------
# Compute battery cell temperature
# ---------------------------------------------------------------------------------
# Determine temperature increase
sigma = 139 # Electrical conductivity
n = 1
F = 96485 # C/mol Faraday constant
delta_S = -496.66*(SOC_old)**6 + 1729.4*(SOC_old)**5 + -2278 *(SOC_old)**4 + 1382.2 *(SOC_old)**3 + \
-380.47*(SOC_old)**2 + 46.508*(SOC_old) + -10.692
i_cell = I_cell/electrode_area # current intensity
q_dot_entropy = -(T_cell)*delta_S*i_cell/(n*F)
q_dot_joule = (i_cell**2)/sigma
Q_heat_gen = (q_dot_joule + q_dot_entropy)*As_cell
q_joule_frac = q_dot_joule/(q_dot_joule + q_dot_entropy)
q_entropy_frac = q_dot_entropy/(q_dot_joule + q_dot_entropy)
# Compute cell temperature
T_current = compute_net_generated_battery_heat(n_total,battery,Q_heat_gen,numerics)
# Power going into the battery accounting for resistance losses
P_loss = n_total*Q_heat_gen
P = P_bat - np.abs(P_loss)
# Compute State Variables
V_ul = compute_NMC_cell_state_variables(battery_data,SOC_old,T_cell,I_cell)
# Li-ion battery interal resistance
R_0 = 0.01483*(SOC_old**2) - 0.02518*SOC_old + 0.1036
# Voltage under load:
V_oc = V_ul + (I_cell * R_0)
# ---------------------------------------------------------------------------------
# Compute updates state of battery
# ---------------------------------------------------------------------------------
# Possible Energy going into the battery:
energy_unmodified = np.dot(I,P)
# Available capacity
capacity_available = E_max - battery.current_energy[0]
# How much energy the battery could be overcharged by
delta = energy_unmodified -capacity_available
delta[delta<0.] = 0.
# Power that shouldn't go in
ddelta = np.dot(D,delta)
# Power actually going into the battery
P[P>0.] = P[P>0.] - ddelta[P>0.]
E_bat = np.dot(I,P)
E_bat = np.reshape(E_bat, | np.shape(E_current) | numpy.shape |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 17:27:46 2018
@author: <NAME>
Implementation of information representation based multi-layer classifier using GFMM
Note: Currently, all samples in the dataset must be normalized to the range of [0, 1] before using this class
"""
import sys, os
sys.path.insert(0, os.path.pardir)
import numpy as np
import math
import ast
import time
import multiprocessing
from functionhelper.bunchdatatype import Bunch
from functionhelper.membershipcalc import memberG, asym_similarity_one_many
from functionhelper.preprocessinghelper import read_file_in_chunks_group_by_label, read_file_in_chunks, string_to_boolean, loadDataset
from functionhelper.hyperboxadjustment import isOverlap, hyperboxOverlapTest, modifiedIsOverlap, hyperboxContraction
from concurrent.futures import ProcessPoolExecutor, as_completed
def get_num_cpu_cores():
num_cores = multiprocessing.cpu_count()
if num_cores >= 4:
num_cores = num_cores - 2
return num_cores
class Info_Presentation_Multi_Layer_Classifier_GFMM(object):
def __init__(self, teta = [0.1, 0.5], gamma = 1, simil_thres = 0.5, oper = 'min'):
self.gamma = gamma
self.teta_onl = teta[0]
self.higher_teta = teta[1:]
self.oper = oper
self.simil_thres = simil_thres
def homogeneous_hyperbox_expansion(self, X_l, X_u, patClassId, current_hyperboxes):
"""
Expand current hyperboxes to cover input patterns, all input samples have the same label with each other as well as current hyperboxes (if exists)
Update the number of patterns contained in the hyperboxes and their centroids of samples
INPUT
Xl Input data lower bounds (rows = objects, columns = features)
Xu Input data upper bounds (rows = objects, columns = features)
patClassId Input data class labels (crisp). patClassId[i] = 0 corresponds to an unlabeled item
current_hyperboxes A list of current hyperboxes in the Bunch datatype (properties: lower, upper, classId, no_pat, centroid)
OUTPUT
result A bunch data size with lower and upper bounds, class labels of hyperboxes
"""
yX = X_l.shape[0]
V = current_hyperboxes.lower
W = current_hyperboxes.upper
classId = current_hyperboxes.classId
no_Pats = current_hyperboxes.no_pat
centroid = current_hyperboxes.centroid
# for each input sample
for i in range(yX):
classOfX = patClassId[i]
if V.size == 0: # no model provided - starting from scratch
V = np.array([X_l[i]])
W = np.array([X_u[i]])
classId = np.array([patClassId[i]])
no_Pats = np.array([1])
centroid = np.array([(X_l[i] + X_u[i]) / 2])
else:
b = memberG(X_l[i], X_u[i], V, W, self.gamma, self.oper)
index = np.argsort(b)[::-1]
bSort = b[index];
if bSort[0] != 1:
adjust = False
for j in index:
# test violation of max hyperbox size and class labels
if ((np.maximum(W[j], X_u[i]) - np.minimum(V[j], X_l[i])) <= self.teta_onl).all() == True:
# adjust the j-th hyperbox
V[j] = np.minimum(V[j], X_l[i])
W[j] = np.maximum(W[j], X_u[i])
no_Pats[j] = no_Pats[j] + 1
centroid[j] = centroid[j] + (((X_l[i] + X_u[i]) / 2) - centroid[j]) / no_Pats[j]
adjust = True
if classOfX != 0 and classId[j] == 0:
classId[j] = classOfX
break
# if i-th sample did not fit into any existing box, create a new one
if not adjust:
V = np.concatenate((V, X_l[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, X_u[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_Pats = np.concatenate((no_Pats, [1]))
new_Central_Sample = (X_l[i] + X_u[i]) / 2
centroid = np.concatenate((centroid, new_Central_Sample.reshape(1, -1)), axis = 0)
return Bunch(lower=V, upper=W, classId=classId, no_pat=no_Pats, centroid=centroid)
def heterogeneous_hyperbox_expansion(self, X_l, X_u, patClassId, current_hyperboxes):
"""
Expand current hyperboxes to cover input patterns, input samples contains different labels
Update the number of patterns contained in the hyperboxes and their centroids of samples
INPUT
Xl Input data lower bounds (rows = objects, columns = features)
Xu Input data upper bounds (rows = objects, columns = features)
patClassId Input data class labels (crisp). patClassId[i] = 0 corresponds to an unlabeled item
current_hyperboxes A list of current hyperboxes in the Bunch datatype (properties: lower, upper, classId, no_pat, centroid)
OUTPUT
result A bunch data size with lower and upper bounds, class labels of hyperboxes
"""
yX = X_l.shape[0]
V = current_hyperboxes.lower
W = current_hyperboxes.upper
classId = current_hyperboxes.classId
no_Pats = current_hyperboxes.no_pat
centroid = current_hyperboxes.centroid
# for each input sample
for i in range(yX):
classOfX = patClassId[i]
if V.size == 0: # no model provided - starting from scratch
V = np.array([X_l[0]])
W = np.array([X_u[0]])
classId = np.array([patClassId[0]])
no_Pats = np.array([1])
centroid = np.array([(X_l[0] + X_u[0]) / 2])
else:
id_lb_sameX = np.logical_or(classId == classOfX, classId == 0)
if id_lb_sameX.any() == True:
V_sameX = V[id_lb_sameX]
W_sameX = W[id_lb_sameX]
lb_sameX = classId[id_lb_sameX]
id_range = np.arange(len(classId))
id_processing = id_range[id_lb_sameX]
b = memberG(X_l[i], X_u[i], V_sameX, W_sameX, self.gamma, self.oper)
index = np.argsort(b)[::-1]
bSort = b[index]
if bSort[0] != 1 or (classOfX != lb_sameX[index[0]] and classOfX != 0):
adjust = False
for j in id_processing[index]:
# test violation of max hyperbox size and class labels
if (classOfX == classId[j] or classId[j] == 0 or classOfX == 0) and ((np.maximum(W[j], X_u[i]) - np.minimum(V[j], X_l[i])) <= self.teta_onl).all() == True:
# adjust the j-th hyperbox
V[j] = np.minimum(V[j], X_l[i])
W[j] = np.maximum(W[j], X_u[i])
no_Pats[j] = no_Pats[j] + 1
centroid[j] = centroid[j] + (((X_l[i] + X_u[i]) / 2) - centroid[j]) / no_Pats[j]
adjust = True
if classOfX != 0 and classId[j] == 0:
classId[j] = classOfX
break
# if i-th sample did not fit into any existing box, create a new one
if not adjust:
V = np.concatenate((V, X_l[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, X_u[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_Pats = np.concatenate((no_Pats, [1]))
new_Central_Sample = (X_l[i] + X_u[i]) / 2
centroid = np.concatenate((centroid, new_Central_Sample.reshape(1, -1)), axis = 0)
else:
# new class lable => create new pattern
V = np.concatenate((V, X_l[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, X_u[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_Pats = np.concatenate((no_Pats, [1]))
new_Central_Sample = (X_l[i] + X_u[i]) / 2
centroid = np.concatenate((centroid, new_Central_Sample.reshape(1, -1)), axis = 0)
return Bunch(lower=V, upper=W, classId=classId, no_pat=no_Pats, centroid=centroid)
def homogeneous_worker_distribution_chunk_by_class(self, chunk_data, dic_current_hyperboxes, nprocs):
"""
Distribute data in the current chunk to each worker according to class labels in turn
INPUT
chunk_data a dictionary contains input data with key being label and value being respective bunch data (properties: data, label)
dic_current_hyperboxes a dictionary contains current coordinates of hyperboxes with labels as keys and values being a list of nprocs bunches of hyperboxes
nprocs number of processes needs to be generated
OUTPUT
dic_results a dictionary contains new coordinates of hyperboxes with labels as keys and values being a list of nprocs bunches of hyperboxe
"""
dic_results = dic_current_hyperboxes
with ProcessPoolExecutor(max_workers=nprocs) as executor:
for key in chunk_data:
futures = []
# get list of current hyperboxes or initialize empty list if not exist list or input key
if len(dic_current_hyperboxes) > 0 and (key in dic_current_hyperboxes):
boxes = dic_current_hyperboxes[key]
else:
boxes = np.empty(nprocs, dtype=Bunch)
for j in range(nprocs):
boxes[j] = Bunch(lower=np.array([]), upper=np.array([]), classId=np.array([]), no_pat=0, centroid=np.array([]))
values = chunk_data[key]
num_samples = len(values.data)
if num_samples >= nprocs:
chunksize = int(math.ceil(num_samples / float(nprocs)))
for i in range(nprocs):
X_l = values.data[(chunksize * i) : (chunksize * (i + 1))]
X_u = values.data[(chunksize * i) : (chunksize * (i + 1))]
patClassId = values.label[(chunksize * i) : (chunksize * (i + 1))]
futures.append(executor.submit(self.homogeneous_hyperbox_expansion, X_l, X_u, patClassId, boxes[i]))
else:
futures.append(executor.submit(self.homogeneous_hyperbox_expansion, values, boxes[0]))
# Instruct workers to process results as they come, when all are completed
as_completed(futures) # wait all workers completed
lst_current_boxes = []
for future in futures:
lst_current_boxes.append(future.result())
dic_results[key] = lst_current_boxes
return dic_results
def heterogeneous_worker_distribution_chunk(self, lst_chunk_data, lst_current_hyperboxes, nprocs):
"""
Distribute data in the current chunk to each worker according to the order of patterns
INPUT
lst_chunk_data a list contains input data with key being label and value being respective bunch data (properties: data, label)
lst_current_hyperboxes a list contains current coordinates of hyperboxes (the number of hyperboxes is respective to the number of init cores)
nprocs number of processes needs to be generated
OUTPUT
lst_result a list of newly generated coordinates of hyperboxes
"""
lst_results = []
futures = []
if len(lst_current_hyperboxes) == 0:
lst_current_hyperboxes = np.empty(nprocs, dtype=Bunch)
for j in range(nprocs):
lst_current_hyperboxes[j] = Bunch(lower=np.array([]), upper=np.array([]), classId=np.array([]), no_pat=0, centroid=np.array([]))
with ProcessPoolExecutor(max_workers=nprocs) as executor:
chunksize = int(math.ceil(len(lst_chunk_data.label) / float(nprocs)))
for i in range(nprocs):
X_l = lst_chunk_data.data[(chunksize * i) : (chunksize * (i + 1))]
X_u = lst_chunk_data.data[(chunksize * i) : (chunksize * (i + 1))]
patClassId = lst_chunk_data.label[(chunksize * i) : (chunksize * (i + 1))]
futures.append(executor.submit(self.heterogeneous_hyperbox_expansion, X_l, X_u, patClassId, lst_current_hyperboxes[i]))
# Instruct workers to process results as they come, when all are completed
as_completed(futures) # wait all workers completed:
for future in futures:
lst_results.append(future.result())
return lst_results
def removeContainedHyperboxes_UpdateCentroid(self):
"""
Remove all hyperboxes contained in other hyperboxes with the same class label and update centroids of larger hyperboxes
This operation is performed on the values of lower and upper bounds, labels, and instance variables
"""
numBoxes = len(self.classId)
indtokeep = np.ones(numBoxes, dtype=np.bool) # position of all hyperboxes kept
no_removed_boxes = 0
for i in range(numBoxes):
# Filter hypeboxes with the sample label as hyperbox i
id_hyperbox_same_label = self.classId == self.classId[i]
id_hyperbox_same_label[i] = False # remove hyperbox i
if id_hyperbox_same_label.any() == True:
# exist at least one hyperbox with the same label as hyperbox i
V_same = self.V[id_hyperbox_same_label]
W_same = self.W[id_hyperbox_same_label]
memValue = memberG(self.V[i], self.W[i], V_same, W_same, self.gamma, self.oper)
equal_one_index = memValue == 1
if np.sum(equal_one_index) > 0:
original_index = np.arange(0, numBoxes)
original_index_same_label = original_index[id_hyperbox_same_label]
index_Parent_Hyperbox = original_index_same_label[np.nonzero(equal_one_index)[0]] # Find indices of hyperboxes that contain hyperbox i
isIncluded = len(index_Parent_Hyperbox) > 0
if isIncluded == True:
indtokeep[i] = False
no_removed_boxes = no_removed_boxes + 1
# Update centroid of larger hyperbox
if len(index_Parent_Hyperbox) == 1:
parent_selection = index_Parent_Hyperbox[0]
elif len(index_Parent_Hyperbox) > 1:
# Compute the distance from the centroid of hyperbox i to centroids of other hyperboxes and choose the hyperbox with the smallest distance to merge
min_dis = np.linalg.norm(self.centroid[i] - self.centroid[index_Parent_Hyperbox[0]])
parent_selection = index_Parent_Hyperbox[0]
for jj in range(1, len(index_Parent_Hyperbox)):
dist = np.linalg.norm(self.centroid[i] - self.centroid[index_Parent_Hyperbox[jj]])
if min_dis < dist:
min_dis = dist
parent_selection = index_Parent_Hyperbox[jj]
# Merge centroids and number of hyperboxes
self.centroid[parent_selection] = (self.no_pat[parent_selection] * self.centroid[parent_selection] + self.no_pat[i] * self.centroid[i]) / (self.no_pat[i] + self.no_pat[parent_selection])
self.no_pat[parent_selection] = self.no_pat[parent_selection] + self.no_pat[i]
# remove hyperboxes contained in other hyperboxes
self.V = self.V[indtokeep, :]
self.W = self.W[indtokeep, :]
self.classId = self.classId[indtokeep]
self.centroid = self.centroid[indtokeep]
self.no_pat = self.no_pat[indtokeep]
self.no_contained_boxes = no_removed_boxes
def predict_val(self, XlT, XuT, patClassIdTest, no_predicted_samples_hyperboxes):
"""
GFMM classification for validation (validation routine) with hyperboxes stored in self. V, W, classId, centroid, no_pat
result = predict_val(XlT,XuT,patClassIdTest)
INPUT
XlT Test data lower bounds (rows = objects, columns = features)
XuT Test data upper bounds (rows = objects, columns = features)
patClassIdTest Test data class labels (crisp)
no_predicted_samples_hyperboxes A matrix contains the number of right and wrong predicted samples of current hyperboxes, column 1: right, column 2: wrong
OUTPUT
A matrix contains number of samples predicted right and wrong in hyperboxes (first column: right, second column: wrong)
"""
#initialization
yX = XlT.shape[0]
mem = np.zeros((yX, self.V.shape[0]))
# classifications
for i in range(yX):
mem[i, :] = memberG(XlT[i, :], XuT[i, :], self.V, self.W, self.gamma, self.oper) # calculate memberships for all hyperboxes
bmax = mem[i,:].max() # get max membership value
maxVind = np.nonzero(mem[i,:] == bmax)[0] # get indexes of all hyperboxes with max membership
if len(maxVind) == 1:
# Only one hyperbox with the highest membership function
if self.classId[maxVind[0]] == patClassIdTest[i]:
no_predicted_samples_hyperboxes[maxVind[0], 0] = no_predicted_samples_hyperboxes[maxVind[0], 0] + 1
else:
no_predicted_samples_hyperboxes[maxVind[0], 1] = no_predicted_samples_hyperboxes[maxVind[0], 1] + 1
else:
# More than one hyperbox with highest membership => compare with centroid
centroid_input_pat = (XlT[i] + XuT[i]) / 2
id_min = maxVind[0]
min_dist = np.linalg.norm(self.centroid[id_min] - centroid_input_pat)
for j in range(1, len(maxVind)):
id_j = maxVind[j]
dist_j = np.linalg.norm(self.centroid[id_j] - centroid_input_pat)
if dist_j < min_dist or (dist_j == min_dist and self.no_pat[id_j] > self.no_pat[id_min]):
id_min = id_j
min_dist = dist_j
if self.classId[id_min] != patClassIdTest[i] and patClassIdTest[i] != 0:
no_predicted_samples_hyperboxes[id_min, 1] = no_predicted_samples_hyperboxes[id_min, 1] + 1
else:
no_predicted_samples_hyperboxes[id_min, 0] = no_predicted_samples_hyperboxes[id_min, 0] + 1
return no_predicted_samples_hyperboxes
def pruningHandling(self, valFile_Path, chunk_size, isPhase1 = True, accuracy_threshold = 0.5):
"""
Pruning for hyperboxes in the current lists: V, W, classid, centroid
Criteria: The accuracy rate < 0.5
INPUT
chunk_size The size of each reading chunk to be handled
valFile_Path The path to the validation file including filename and its extension
accuracy_threshold The minimum accuracy for each hyperbox
isPhase1 True: Pruning using minPatternsPerBox, otherwise => do not use minPatternsPerBox
"""
# delete hyperboxes containing the number of patterns fewer than minPatternsPerBox
# currenNoHyperbox = len(self.classId)
# index_Kept = np.ones(currenNoHyperbox).astype(bool)
# isExistPrunedBox = False
# if isPhase1 == True:
# for i in range(currenNoHyperbox):
# if self.no_pat[i] < minPatternsPerBox:
# index_Kept[i] = False
# isExistPrunedBox = True
# if isExistPrunedBox == True:
# self.V = self.V[index_Kept]
# self.W = self.W[index_Kept]
# self.classId = self.classId[index_Kept]
# self.centroid = self.centroid[index_Kept]
# self.no_pat = self.no_pat[index_Kept]
# pruning using validation set
currenNoHyperbox = len(self.classId)
if currenNoHyperbox > 0:
# index_Kept = np.ones(currenNoHyperbox).astype(bool) # recompute the marking matrix
chunk_id = 0
# init two lists containing number of patterns classified correctly and incorrectly for each hyperbox
no_predicted_samples_hyperboxes = np.zeros((len(self.classId), 2))
while True:
# handle in chunks
chunk_data = read_file_in_chunks(valFile_Path, chunk_id, chunk_size)
if chunk_data != None:
chunk_id = chunk_id + 1
# carried validation
no_predicted_samples_hyperboxes = self.predict_val(chunk_data.data, chunk_data.data, chunk_data.label, no_predicted_samples_hyperboxes)
else:
break
# pruning handling based on the validation results
tmp_no_box = no_predicted_samples_hyperboxes.shape[0]
accuracy_larger_half = np.zeros(tmp_no_box).astype(np.bool)
for i in range(tmp_no_box):
if (no_predicted_samples_hyperboxes[i, 0] + no_predicted_samples_hyperboxes[i, 1] != 0) and no_predicted_samples_hyperboxes[i, 0] / (no_predicted_samples_hyperboxes[i, 0] + no_predicted_samples_hyperboxes[i, 1]) >= accuracy_threshold:
accuracy_larger_half[i] = True
# Pruning
self.V = self.V[accuracy_larger_half]
self.W = self.W[accuracy_larger_half]
self.classId = self.classId[accuracy_larger_half]
self.centroid = self.centroid[accuracy_larger_half]
self.no_pat = self.no_pat[accuracy_larger_half]
def granular_phase_one_classifier(self, dataFilePath, chunk_size, type_chunk = 1, isPruning = False, valFile_Path = '', accuracyPerBox = 0.5, XlT = None, XuT = None, patClassIdTest = None, file_object_save = None):
"""
This method is to read the dataset in chunks and build base hyperboxes from the input data
INPUT
dataFilePath The path to the training dataset file including file name and its extension
chunk_size The size of each reading chunk to be handled
type_chunk The type of data contained in each chunk:
+ 1: heterogeneous data with different class label
+ otherwise: data are grouped by class labels
isPruning True: apply the pruning process
False: not use the pruning process
valFile_Path The path to the validation file including filename and its extension
accuracyPerBox Minimum accuracy of each hyperbox w.r.t validation set
XlT Test data lower bounds (rows = objects, columns = features)
XuT Test data upper bounds (rows = objects, columns = features)
patClassIdTest Test data class labels (crisp)
"""
chunk_id = 0
nprocs = get_num_cpu_cores() # get number of cores in cpu for handling data
print("No. cores =", nprocs)
if file_object_save != None:
file_object_save.write("No. cores = %d \n" % nprocs)
# Initialize hyperboxes for each core
if type_chunk == 1:
current_hyperboxes = [] # empty list
else:
current_hyperboxes = {} # empty hashtable
time_start = time.perf_counter()
while True:
chunk_data = read_file_in_chunks(dataFilePath, chunk_id, chunk_size) if type_chunk == 1 else read_file_in_chunks_group_by_label(dataFilePath, chunk_id, chunk_size)
if chunk_data != None:
if type_chunk == 1:
current_hyperboxes = self.heterogeneous_worker_distribution_chunk(chunk_data, current_hyperboxes, nprocs)
else:
current_hyperboxes = self.homogeneous_worker_distribution_chunk_by_class(chunk_data, current_hyperboxes, nprocs)
chunk_id = chunk_id + 1
else:
break
# Merge all generated hyperboxes and then remove all hyperboxes insider larger hyperboxes and update their centroids
if type_chunk == 1:
self.V = current_hyperboxes[0].lower
self.W = current_hyperboxes[0].upper
self.classId = current_hyperboxes[0].classId
self.no_pat = current_hyperboxes[0].no_pat
self.centroid = current_hyperboxes[0].centroid
num_Eles = len(current_hyperboxes)
for kk in range(1, num_Eles):
self.V = np.concatenate((self.V, current_hyperboxes[kk].lower), axis=0)
self.W = np.concatenate((self.W, current_hyperboxes[kk].upper), axis=0)
self.classId = np.concatenate((self.classId, current_hyperboxes[kk].classId))
self.no_pat = np.concatenate((self.no_pat, current_hyperboxes[kk].no_pat))
self.centroid = np.concatenate((self.centroid, current_hyperboxes[kk].centroid), axis=0)
else:
self.V = []
for key in current_hyperboxes:
for value in current_hyperboxes[key]:
if len(self.V) == 0:
self.V = value.lower
self.W = value.upper
self.classId = value.classId
self.no_pat = value.no_pat
self.centroid = value.centroid
else:
if len(value.lower) > 0:
self.V = np.concatenate((self.V, value.lower), axis=0)
self.W = np.concatenate((self.W, value.upper), axis=0)
self.classId = np.concatenate((self.classId, value.classId))
self.no_pat = np.concatenate((self.no_pat, value.no_pat))
self.centroid = np.concatenate((self.centroid, value.centroid), axis=0)
# delete hyperboxes contained in other hyperboxes and update the centroids of larger hyperboxes
self.removeContainedHyperboxes_UpdateCentroid()
numBoxes_before_pruning = len(self.classId)
self.phase1_elapsed_training_time = time.perf_counter() - time_start
self.training_time_before_pruning = self.phase1_elapsed_training_time
if (XlT is not None) and (len(self.classId) > 0):
numTestSample = XlT.shape[0]
result_testing = self.predict_test(XlT, XuT, patClassIdTest)
if (result_testing is not None) and file_object_save is not None:
file_object_save.write("Phase 1 before pruning: \n")
file_object_save.write("Number of testing samples = %d \n" % numTestSample)
file_object_save.write("Number of wrong predicted samples = %d \n" % result_testing.summis)
file_object_save.write("Error Rate = %f \n" % (np.round(result_testing.summis / numTestSample * 100, 4)))
file_object_save.write("No. samples use centroid for prediction = %d \n" % result_testing.use_centroid)
file_object_save.write("No. samples use centroid but wrong prediction = %d \n" % result_testing.use_centroid_wrong)
if isPruning:
time_start = time.perf_counter()
self.pruningHandling(valFile_Path, chunk_size, True, accuracyPerBox)
numBoxes_after_pruning = len(self.classId)
self.phase1_elapsed_training_time = self.phase1_elapsed_training_time + (time.perf_counter() - time_start)
if (XlT is not None) and len(self.classId) > 0:
result_testing = self.predict_test(XlT, XuT, patClassIdTest)
if result_testing is not None and file_object_save is not None:
file_object_save.write("Phase 1 after pruning: \n")
file_object_save.write("Number of wrong predicted samples = %d \n" % result_testing.summis)
file_object_save.write("Error Rate = %f \n" % (np.round(result_testing.summis / numTestSample * 100, 4)))
file_object_save.write("No. samples use centroid for prediction = %d \n" % result_testing.use_centroid)
file_object_save.write("No. samples use centroid but wrong prediction = %d \n" % result_testing.use_centroid_wrong)
if file_object_save is not None:
file_object_save.write("No. hyperboxes before pruning: %d \n" % numBoxes_before_pruning)
if isPruning and file_object_save is not None:
file_object_save.write("No. hyperboxes after pruning: %d \n" % numBoxes_after_pruning)
if file_object_save is not None:
file_object_save.write('Phase 1 running time = %f \n' % self.phase1_elapsed_training_time)
file_object_save.write('Running time before pruning = %f \n' % self.training_time_before_pruning)
return self
# def granular_phase_two_classifier(self, isAllowedOverlap = False):
# """
# Phase 2 in the classifier: using agglomerative learning to aggregate smaller hyperboxes with the same class
#
# granular_phase_two_classifier(isAllowedOverlap)
#
# INPUT
# isAllowedOverlap + True: the aggregated hyperboxes are allowed to overlap with hyperboxes represented other classes
# + False: no overlap among hyperboxes allowed
#
# OUTPUT
# V, W, classId, centroid, no_pat are adjusted
# """
# yX, xX = self.V.shape
# time_start = time.perf_counter()
# # training
# isTraining = True
# while isTraining:
# isTraining = False
#
# k = 0 # input pattern index
# while k < len(self.classId):
# idx_same_classes = np.logical_or(self.classId == self.classId[k], self.classId == 0)
# idx_same_classes[k] = False # remove element in the position k
# idex = np.arange(len(self.classId))
# idex = idex[idx_same_classes] # keep the indices of elements retained
# V_same_class = self.V[idx_same_classes]
# W_same_class = self.W[idx_same_classes]
#
# if self.simil_type == 'short':
# b = memberG(self.W[k], self.V[k], V_same_class, W_same_class, self.gamma, self.oper)
# elif self.simil_type == 'long':
# b = memberG(self.V[k], self.W[k], W_same_class, V_same_class, self.gamma, self.oper)
# else:
# b = asym_similarity_one_many(self.V[k], self.W[k], V_same_class, W_same_class, self.gamma, self.oper_asym, self.oper)
#
# indB = np.argsort(b)[::-1]
# idex = idex[indB]
# sortB = b[indB]
#
# maxB = sortB[sortB >= self.simil_thres] # apply membership threshold
#
# if len(maxB) > 0:
# idexmax = idex[sortB >= self.simil_thres]
#
# pairewise_maxb = np.concatenate((np.minimum(k, idexmax)[:, np.newaxis], np.maximum(k,idexmax)[:, np.newaxis], maxB[:, np.newaxis]), axis=1)
#
# for i in range(pairewise_maxb.shape[0]):
# # calculate new coordinates of k-th hyperbox by including pairewise_maxb(i,1)-th box, scrap the latter and leave the rest intact
# # agglomorate pairewise_maxb(i, 0) and pairewise_maxb(i, 1) by adjusting pairewise_maxb(i, 0)
# # remove pairewise_maxb(i, 1) by getting newV from 1 -> pairewise_maxb(i, 0) - 1, new coordinates for pairewise_maxb(i, 0), from pairewise_maxb(i, 0) + 1 -> pairewise_maxb(i, 1) - 1, pairewise_maxb(i, 1) + 1 -> end
# ind_hyperbox_1 = int(pairewise_maxb[i, 0])
# ind_hyperbox_2 = int(pairewise_maxb[i, 1])
# newV = np.concatenate((self.V[:ind_hyperbox_1], np.minimum(self.V[ind_hyperbox_1], self.V[ind_hyperbox_2]).reshape(1, -1), self.V[ind_hyperbox_1 + 1:ind_hyperbox_2], self.V[ind_hyperbox_2 + 1:]), axis=0)
# newW = np.concatenate((self.W[:ind_hyperbox_1], np.maximum(self.W[ind_hyperbox_1], self.W[ind_hyperbox_2]).reshape(1, -1), self.W[ind_hyperbox_1 + 1:ind_hyperbox_2], self.W[ind_hyperbox_2 + 1:]), axis=0)
# newClassId = np.concatenate((self.classId[:ind_hyperbox_2], self.classId[ind_hyperbox_2 + 1:]))
#
## index_remain = np.ones(len(self.classId)).astype(np.bool)
## index_remain[ind_hyperbox_2] = False
## newV = self.V[index_remain]
## newW = self.W[index_remain]
## newClassId = self.classId[index_remain]
## if ind_hyperbox_1 < ind_hyperbox_2:
## tmp_row = ind_hyperbox_1
## else:
## tmp_row = ind_hyperbox_1 - 1
## newV[tmp_row] = np.minimum(self.V[ind_hyperbox_1], self.V[ind_hyperbox_2])
## newW[tmp_row] = np.maximum(self.W[ind_hyperbox_1], self.W[ind_hyperbox_2])
##
# # adjust the hyperbox if no overlap and maximum hyperbox size is not violated
# # position of adjustment is pairewise_maxb[i, 0] in new bounds
# no_overlap = True
# if isAllowedOverlap == False:
# no_overlap = not isOverlap(newV, newW, pairewise_maxb[i, 0].astype(np.int64), newClassId)
#
# if no_overlap and (((newW[pairewise_maxb[i, 0].astype(np.int64)] - newV[pairewise_maxb[i, 0].astype(np.int64)]) <= self.teta_agglo).all() == True):
# self.V = newV
# self.W = newW
# self.classId = newClassId
#
# # merge centroids and tune the number of patterns contained in the newly aggregated hyperbox, delete data of the eliminated hyperbox
# self.centroid[ind_hyperbox_1] = (self.no_pat[ind_hyperbox_1] * self.centroid[ind_hyperbox_1] + self.no_pat[ind_hyperbox_2] * self.centroid[ind_hyperbox_2]) / (self.no_pat[ind_hyperbox_1] + self.no_pat[ind_hyperbox_2])
# # delete centroid of hyperbox ind_hyperbox_2
# self.centroid = np.concatenate((self.centroid[:ind_hyperbox_2], self.centroid[ind_hyperbox_2 + 1:]), axis=0)
#
# self.no_pat[ind_hyperbox_1] = self.no_pat[ind_hyperbox_1] + self.no_pat[ind_hyperbox_2]
# self.no_pat = np.concatenate((self.no_pat[:ind_hyperbox_2], self.no_pat[ind_hyperbox_2 + 1:]))
#
# isTraining = True
#
# if k != pairewise_maxb[i, 0]: # position pairewise_maxb[i, 1] (also k) is removed, so next step should start from pairewise_maxb[i, 1]
# k = k - 1
#
# break # if hyperbox adjusted there's no need to look at other hyperboxes
#
#
# k = k + 1
#
# self.phase2_elapsed_training_time = time.perf_counter() - time_start
# print("No. hyperboxes after phase 2: ", len(self.classId))
# print('Phase 2 running time =', self.phase2_elapsed_training_time)
def granular_phase_two_classifier(self, XlT, XuT, patClassIdTest, file_object_save=None):
"""
Phase 2 in the classifier: using modified online learning to aggregate smaller hyperboxes with the same class
granular_phase_two_classifier(max_hyperbox_size)
INPUT
XlT Test data lower bounds (rows = objects, columns = features)
XuT Test data upper bounds (rows = objects, columns = features)
patClassIdTest Test data class labels (crisp)
file_object_save The file object to write down the results
OUTPUT
V, W, classId, centroid, no_pat are adjusted
"""
self.phase2_elapsed_training_time = 0
for teta in self.higher_teta:
V = []
if len(self.classId) > 1:
start_t = time.perf_counter()
for i in range(len(self.classId)):
if len(V) == 0:
V = np.array([self.V[i]])
W = np.array([self.W[i]])
classId = np.array([self.classId[i]])
no_pat = np.array([self.no_pat[i]])
centroid = np.array([self.centroid[i]])
else:
classOfX = self.classId[i]
id_lb_sameX = np.logical_or(classId == classOfX, classId == 0)
isAddNew = False
if id_lb_sameX.any() == True:
V_sameX = V[id_lb_sameX]
W_sameX = W[id_lb_sameX]
lb_sameX = classId[id_lb_sameX]
id_range = np.arange(len(classId))
id_processing = id_range[id_lb_sameX]
b = memberG(self.V[i], self.W[i], V_sameX, W_sameX, self.gamma)
index = np.argsort(b)[::-1]
bSort = b[index]
if bSort[0] != 1 or (classOfX != lb_sameX[index[0]] and classOfX != 0):
adjust = False
maxB = bSort[bSort >= self.simil_thres] # apply membership threshold
if len(maxB) > 0:
indexmax = index[bSort >= self.simil_thres]
for j in id_processing[indexmax]:
# test violation of max hyperbox size and class labels
if (classOfX == classId[j] or classId[j] == 0 or classOfX == 0) and ((np.maximum(W[j], self.W[i]) - np.minimum(V[j], self.V[i])) <= teta).all() == True:
# save old value
Vj_old = V[j].copy()
Wj_old = W[j].copy()
classId_old = classId[j]
# adjust the j-th hyperbox
V[j] = np.minimum(V[j], self.V[i])
W[j] = np.maximum(W[j], self.W[i])
if classOfX != 0 and classId[j] == 0:
classId[j] = classOfX
# Test overlap
if modifiedIsOverlap(V, W, j, classId) == True: # overlap test
# revert change and Choose other hyperbox
V[j] = Vj_old
W[j] = Wj_old
classId[j] = classId_old
else:
# Keep changes and update centroid, stopping the process of choosing hyperboxes
no_pat[j] = no_pat[j] + self.no_pat[i]
centroid[j] = centroid[j] + (self.no_pat[i] / no_pat[j]) * (self.centroid[i] - centroid[j])
adjust = True
break
# if i-th sample did not fit into any existing box, create a new one
if not adjust:
isAddNew = True
else:
isAddNew = True
if isAddNew == True:
V = np.concatenate((V, self.V[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, self.W[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_pat = np.concatenate((no_pat, [self.no_pat[i]]))
centroid = np.concatenate((centroid, self.centroid[i].reshape(1, -1)), axis = 0)
# Test overlap and do contraction with current hyperbox because phase 1 create overlapping regions
indOfWinner = len(classId) - 1
for ii in range(V.shape[0]):
if ii != indOfWinner and classId[ii] != classId[indOfWinner]:
caseDim = hyperboxOverlapTest(V, W, indOfWinner, ii) # overlap test
if caseDim.size > 0:
V, W = hyperboxContraction(V, W, caseDim, ii, indOfWinner)
self.V = V
self.W = W
self.classId = classId
self.no_pat = no_pat
self.centroid = centroid
sub_space_time = time.perf_counter() - start_t
self.phase2_elapsed_training_time = self.phase2_elapsed_training_time + sub_space_time
if file_object_save is not None:
file_object_save.write("=> teta = %f \n" % teta)
file_object_save.write("Num hyperboxes = %d \n" % len(self.classId))
file_object_save.write("Running time = %f \n" % sub_space_time)
# Do testing
result_testing = self.predict_test(XlT, XuT, patClassIdTest)
if (result_testing is not None) and (file_object_save is not None):
numTestSample = XlT.shape[0]
file_object_save.write("Number of testing samples = %d \n" % numTestSample)
file_object_save.write("Number of wrong predicted samples = %d \n" % result_testing.summis)
file_object_save.write("Error Rate = %f \n" % (np.round(result_testing.summis / numTestSample * 100, 4)))
file_object_save.write("No. samples use centroid for prediction = %d \n" % result_testing.use_centroid)
file_object_save.write("No. samples use centroid but wrong prediction = %d \n" % result_testing.use_centroid_wrong)
if file_object_save is not None:
file_object_save.write("Phase 2 training time = %f \n" % self.phase2_elapsed_training_time)
def predict(self, Xl, Xu):
"""
Predict the class of the input layer
result = predict_test(XlT,XuT)
INPUT
Xl Input data lower bounds (rows = objects, columns = features)
Xu Input data upper bounds (rows = objects, columns = features)
OUTPUT
result A list of predicted results for input samples
"""
#initialization
yX = Xl.shape[0]
result = np.empty(yX)
mem = np.zeros((yX, self.V.shape[0]))
# classifications
for i in range(yX):
mem[i, :] = memberG(Xl[i, :], Xu[i, :], self.V, self.W, self.gamma, self.oper) # calculate memberships for all hyperboxes
bmax = mem[i,:].max() # get max membership value
maxVind = np.nonzero(mem[i,:] == bmax)[0] # get indexes of all hyperboxes with max membership
if len(maxVind) == 1:
# only one hyperbox with the highest membership value
result[i] = self.classId[maxVind[0]]
else:
# More than one hyperbox with the highest membership value => compare with centroid
same_first_el_class = maxVind[self.classId[maxVind] == self.classId[maxVind[0]]]
if len(maxVind) == len(same_first_el_class):
# all membership in maxVind have the same class
result[i] = self.classId[maxVind[0]]
else:
# at least one pair of hyperboxes with different class => compare the centroid, and classify the input to the hyperboxes with nearest distance to the input pattern
centroid_input_pat = (Xl[i] + Xu[i]) / 2
id_min = maxVind[0]
min_dist = | np.linalg.norm(self.centroid[id_min] - centroid_input_pat) | numpy.linalg.norm |
# This script is used to calculate some statistics for the Green View Results
# Copyright(C) <NAME>, <NAME>, <NAME>, Senseable City Lab, MIT
def Read_GSVinfo_Text(GVI_Res_txt):
'''
This function is used to read the information in text files or folders
the fundtion will remove the duplicate sites and only select those sites
have GSV info in green month.
Return:
panoIDLst,panoDateLst,panoLonLst,panoLatLst,greenViewLst
Pamameters:
GVI_Res_txt: the file name of the GSV information txt file
'''
import os,os.path
# empty list to save the GVI result and GSV metadata
panoIDLst = []
panoDateLst = []
panoLonLst = []
panoLatLst = []
greenViewLst = []
# read the green view index result txt files
lines = open(GVI_Res_txt,"r")
for line in lines:
# check the completeness of each line, each line include attribute of, panoDate, lon, lat,greenView
if "panoDate" not in line or "greenview" not in line:
continue
panoID = line.split(" panoDate")[0][-22:]
panoDate = line.split(" longitude")[0][-7:]
coordinate = line.split("longitude: ")[1]
lon = coordinate.split(" latitude: ")[0]
latView = coordinate.split(" latitude: ")[1]
lat = latView.split(', greenview:')[0]
greenView = line.split("greenview:")[1]
# check if the greeView data is valid
if len(greenView)<2:
continue
elif float(greenView) <= 0:
## print(greenView)
continue
# remove the duplicated panorama id
if panoID not in panoIDLst:
panoIDLst.append(panoID)
panoDateLst.append(panoDate)
panoLonLst.append(lon)
panoLatLst.append(lat)
greenViewLst.append(greenView)
return panoIDLst,panoDateLst,panoLonLst,panoLatLst,greenViewLst
# read the green view index files into list, the input can be file or folder
def Read_GVI_res(GVI_Res):
'''
This function is used to read the information in text files or folders
the fundtion will remove the duplicate sites and only select those sites
have GSV info in green month.
Return:
panoIDLst,panoDateLst,panoLonLst,panoLatLst,greenViewLst
Pamameters:
GVI_Res: the file name of the GSV information text, could be folder or txt file
last modified by <NAME>, March 27, 2018
'''
import os,os.path
# empty list to save the GVI result and GSV metadata
panoIDLst = []
panoDateLst = []
panoLonLst = []
panoLatLst = []
greenViewLst = []
# if the input gvi result is a folder
if os.path.isdir(GVI_Res):
allTxtFiles = os.listdir(GVI_Res)
for txtfile in allTxtFiles:
print('k')
# only read the text file
if not txtfile.endswith('.txt'):
continue
txtfilename = os.path.join(GVI_Res,txtfile)
# call the function to read txt file to a list
[panoIDLst_tem,panoDateLst_tem,panoLonLst_tem,panoLatLst_tem,greenViewLst_tem] = Read_GSVinfo_Text(txtfilename)
panoIDLst = panoIDLst + panoIDLst_tem
panoDateLst = panoDateLst + panoDateLst_tem
panoLonLst = panoLonLst + panoLonLst_tem
panoLatLst = panoLatLst + panoLatLst_tem
greenViewLst = greenViewLst + greenViewLst_tem
else: #for single txt file
[panoIDLst_tem,panoDateLst_tem,panoLonLst_tem,panoLatLst_tem,greenViewLst_tem] = Read_GSVinfo_Text(txtfilename)
return panoIDLst,panoDateLst,panoLonLst,panoLatLst,greenViewLst
## ----------------- Main function ------------------------
if __name__ == "__main__":
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
inputGVIres = r'C:\\Users\\rangu_uhpmatw\\Documents\\GitHub\\customs\\Treepedia_Public\\SA\\SA_GVR'
outputShapefile = 'C:\\Users\\rangu_uhpmatw\\Documents\\GitHub\\customs\\Treepedia_Public\\SA\\SA_GVR.shp'
lyrname = 'greenView'
[_,_,_,_,SA] = Read_GVI_res(inputGVIres)
print ('The length of the SA is:', len(SA))
SA = np.array(SA).astype(np.float)
print(np.average(SA))
print(np.percentile(SA, 0))
print(np.percentile(SA, 25))
print( | np.percentile(SA, 50) | numpy.percentile |
"""Operations for dannce."""
import numpy as np
import cv2
import time
from typing import Text
import torch
import torch.nn.functional as F
class Camera:
def __init__(self, R, t, K, tdist, rdist, name=""):
self.R = np.array(R).copy()
assert self.R.shape == (3, 3)
self.t = np.array(t).copy()
assert self.t.shape == (1, 3)
self.K = np.array(K).copy()
assert self.K.shape == (3, 3)
self.M = np.concatenate((R, t), axis=0) @ self.K
self.tdist = tdist
self.rdist = rdist
self.name = name
def update_after_crop(self, bbox):
left, upper, right, lower = bbox
cx, cy = self.K[2, 0], self.K[2, 1]
new_cx = cx - left
new_cy = cy - upper
self.K[2, 0], self.K[2, 1] = new_cx, new_cy
def update_after_resize(self, image_shape, new_image_shape):
height, width = image_shape
new_height, new_width = new_image_shape
fx, fy, cx, cy = self.K[0, 0], self.K[1, 1], self.K[2, 0], self.K[2, 1]
new_fx = fx * (new_width / width)
new_fy = fy * (new_height / height)
new_cx = cx * (new_width / width)
new_cy = cy * (new_height / height)
self.K[0, 0], self.K[1, 1], self.K[2, 0], self.K[2, 1] = new_fx, new_fy, new_cx, new_cy
@property
def camera_matrix(self):
return self.extrinsics.dot(self.K)
@property
def extrinsics(self):
return np.concatenate((self.R, self.t), axis=0)
def camera_matrix(K: np.ndarray, R: np.ndarray, t: np.ndarray) -> np.ndarray:
"""Derive the camera matrix.
Derive the camera matrix from the camera intrinsic matrix (K),
and the extrinsic rotation matric (R), and extrinsic
translation vector (t).
Note that this uses the matlab convention, such that
M = [R;t] * K
"""
return np.concatenate((R, t), axis=0) @ K
def world_to_cam(pts, M, device):
M = M.to(device=device)
pts1 = torch.ones(pts.shape[0], 1, dtype=torch.float32, device=device)
projPts = torch.matmul(torch.cat((pts, pts1), 1), M)
return projPts
def project_to2d(pts, M: np.ndarray, device: Text) -> torch.Tensor:
"""Project 3d points to 2d.
Projects a set of 3-D points, pts, into 2-D using the camera intrinsic
matrix (K), and the extrinsic rotation matric (R), and extrinsic
translation vector (t). Note that this uses the matlab
convention, such that
M = [R;t] * K, and pts2d = pts3d * M
"""
# pts = torch.Tensor(pts.copy()).to(device)
M = M.to(device=device)
pts1 = torch.ones(pts.shape[0], 1, dtype=torch.float32, device=device)
projPts = torch.matmul(torch.cat((pts, pts1), 1), M)
projPts[:, :2] = projPts[:, :2] / projPts[:, 2:]
return projPts
def sample_grid_nearest(
im: np.ndarray, projPts: np.ndarray, device: Text
) -> torch.Tensor:
"""Unproject features."""
# im_x, im_y are the x and y coordinates of each projected 3D position.
# These are concatenated here for every image in each batch,
feats = torch.as_tensor(im.copy(), device=device)
grid = projPts
c = int(round(projPts.shape[0] ** (1 / 3.0)))
fh, fw, fdim = list(feats.shape)
# # make sure all projected indices fit onto the feature map
im_x = torch.clamp(grid[:, 0], 0, fw - 1)
im_y = torch.clamp(grid[:, 1], 0, fh - 1)
im_xr = im_x.round().type(torch.long)
im_yr = im_y.round().type(torch.long)
im_xr[im_xr < 0] = 0
im_yr[im_yr < 0] = 0
Ir = feats[im_yr, im_xr]
return Ir.reshape((c, c, c, -1)).permute(3, 0, 1, 2).unsqueeze(0)
def sample_grid_linear(
im: np.ndarray, projPts: np.ndarray, device: Text
) -> torch.Tensor:
"""Unproject features."""
# im_x, im_y are the x and y coordinates of each projected 3D position.
# These are concatenated here for every image in each batch,
feats = torch.as_tensor(im.copy(), device=device)
grid = projPts
c = int(round(projPts.shape[0] ** (1 / 3.0)))
fh, fw, fdim = list(feats.shape)
# # make sure all projected indices fit onto the feature map
im_x = torch.clamp(grid[:, 0], 0, fw - 1)
im_y = torch.clamp(grid[:, 1], 0, fh - 1)
# round all indices
im_x0 = torch.floor(im_x).type(torch.long)
# new array with rounded projected indices + 1
im_x1 = im_x0 + 1
im_y0 = torch.floor(im_y).type(torch.long)
im_y1 = im_y0 + 1
# Convert from int to float -- but these are still round
# numbers because of rounding step above
im_x0_f, im_x1_f = im_x0.type(torch.float), im_x1.type(torch.float)
im_y0_f, im_y1_f = im_y0.type(torch.float), im_y1.type(torch.float)
# Gather values
# Samples all featuremaps at the projected indices,
# and their +1 counterparts. Stop at Ia for nearest neighbor interpolation.
# need to clip the corner indices because they might be out of bounds...
# This could lead to different behavior compared to TF/numpy, which return 0
# when an index is out of bounds
im_x1_safe = torch.clamp(im_x1, 0, fw - 1)
im_y1_safe = torch.clamp(im_y1, 0, fh - 1)
im_x1[im_x1 < 0] = 0
im_y1[im_y1 < 0] = 0
im_x0[im_x0 < 0] = 0
im_y0[im_y0 < 0] = 0
im_x1_safe[im_x1_safe < 0] = 0
im_y1_safe[im_y1_safe < 0] = 0
Ia = feats[im_y0, im_x0]
Ib = feats[im_y0, im_x1_safe]
Ic = feats[im_y1_safe, im_x0]
Id = feats[im_y1_safe, im_x1_safe]
# To recaptiulate behavior in numpy/TF, zero out values that fall outside bounds
Ib[im_x1 > fw - 1] = 0
Ic[im_y1 > fh - 1] = 0
Id[(im_x1 > fw - 1) | (im_y1 > fh - 1)] = 0
# Calculate bilinear weights
# We've now sampled the feature maps at corners around the projected values
# Here, the corners are weighted by distance from the projected value
wa = (im_x1_f - im_x) * (im_y1_f - im_y)
wb = (im_x1_f - im_x) * (im_y - im_y0_f)
wc = (im_x - im_x0_f) * (im_y1_f - im_y)
wd = (im_x - im_x0_f) * (im_y - im_y0_f)
Ibilin = (
wa.unsqueeze(1) * Ia
+ wb.unsqueeze(1) * Ib
+ wc.unsqueeze(1) * Ic
+ wd.unsqueeze(1) * Id
)
return Ibilin.reshape((c, c, c, -1)).permute(3, 0, 1, 2).unsqueeze(0)
def sample_grid(im: np.ndarray, projPts: np.ndarray, device: Text, method: Text = "linear"):
"""Transfer 3d features to 2d by projecting down to 2d grid, using torch.
Use 2d interpolation to transfer features to 3d points that have
projected down onto a 2d grid
Note that function expects proj_grid to be flattened, so results should be
reshaped after being returned
"""
if method == "nearest" or method == "out2d":
proj_rgb = sample_grid_nearest(im, projPts, device)
elif method == "linear" or method == "bilinear":
proj_rgb = sample_grid_linear(im, projPts, device)
else:
raise Exception("{} not a valid interpolation method".format(method))
return proj_rgb
def unDistortPoints(
pts,
intrinsicMatrix,
radialDistortion,
tangentDistortion,
rotationMatrix,
translationVector,
):
"""Remove lens distortion from the input points.
Input is size (M,2), where M is the number of points
"""
dcoef = radialDistortion.ravel()[:2].tolist() + tangentDistortion.ravel().tolist()
if len(radialDistortion.ravel()) == 3:
dcoef = dcoef + [radialDistortion.ravel()[-1]]
else:
dcoef = dcoef + [0]
ts = time.time()
pts_u = cv2.undistortPoints(
np.reshape(pts, (-1, 1, 2)).astype("float32"),
intrinsicMatrix.T,
np.array(dcoef),
P=intrinsicMatrix.T,
)
pts_u = | np.reshape(pts_u, (-1, 2)) | numpy.reshape |
import numpy
import scipy.stats
import math
def one_hot(array, N):
"""
Convert an array of numbers to an array of one-hot vectors.
:param array: classes to convert
:type array: numpy.ndarray
:param N: number of classes
:type N: int
:return: one-hot vectors
:rtype: numpy.ndarray
"""
array = array.astype(int)
assert numpy.max(array) < N
assert numpy.min(array) >= 0
one_hot = numpy.zeros((array.shape[0], N))
one_hot[numpy.arange(array.shape[0]), array] = 1
return one_hot
def expand_as(array, array_as):
"""
Expands the tensor using view to allow broadcasting.
:param array: input tensor
:type array: numpy.ndarray
:param array_as: reference tensor
:type array_as: torch.Tensor or torch.autograd.Variable
:return: tensor expanded with singelton dimensions as tensor_as
:rtype: torch.Tensor or torch.autograd.Variable
"""
shape = list(array.shape)
for i in range(len(array.shape), len(array_as.shape)):
shape.append(1)
return array.reshape(shape)
def concatenate(array1, array2, axis=0):
"""
Basically a wrapper for numpy.concatenate, with the exception
that the array itself is returned if its None or evaluates to False.
:param array1: input array or None
:type array1: mixed
:param array2: input array
:type array2: numpy.ndarray
:param axis: axis to concatenate
:type axis: int
:return: concatenated array
:rtype: numpy.ndarray
"""
assert isinstance(array2, numpy.ndarray)
if array1 is not None:
assert isinstance(array1, numpy.ndarray)
return numpy.concatenate((array1, array2), axis=axis)
else:
return array2
def exponential_norm(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly by norm and direction separately.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
truncated_normal = scipy.stats.truncexpon.rvs(1, loc=0, scale=0.9, size=(batch_size, 1))
random *= numpy.repeat(truncated_normal, axis=1, repeats=dim)
return random
def uniform_norm(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly by norm and direction separately.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) # exponent is only difference!
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def uniform_ball(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly in the n-ball.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
uniform = numpy.random.uniform(0, 1, (batch_size, 1)) ** (1. / dim)
random *= numpy.repeat(uniform, axis=1, repeats=dim)
return random
def uniform_sphere(batch_size, dim, epsilon=1, ord=2):
"""
Sample vectors uniformly on the n-sphere.
See Harman et al., On decompositional algorithms for uniform sampling from n-spheres and n-balls.
:param batch_size: how many vectors to sample
:type batch_size: int
:param dim: dimensionality of vectors
:type dim: int
:param epsilon: epsilon-ball
:type epsilon: float
:param ord: norm to use
:type ord: int
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
random = numpy.random.randn(batch_size, dim)
random /= numpy.repeat(numpy.linalg.norm(random, ord=ord, axis=1).reshape(-1, 1), axis=1, repeats=dim)
random *= epsilon
return random
def truncated_normal(size, lower=-2, upper=2):
"""
Sample from truncated normal.
See https://stackoverflow.com/questions/18441779/how-to-specify-upper-and-lower-limits-when-using-numpy-random-normal.
:param size: size of vector
:type size: [int]
:param lower: lower bound
:type lower: float
:param upper: upper bound
:type upper: float
:return: batch_size x dim tensor
:rtype: numpy.ndarray
"""
return scipy.stats.truncnorm.rvs(lower, upper, size=size)
def project_simplex(v, s=1):
"""
Taken from https://gist.github.com/daien/1272551/edd95a6154106f8e28209a1c7964623ef8397246.
Compute the Euclidean projection on a positive simplex
Solves the optimisation problem (using the algorithm from [1]):
min_w 0.5 * || w - v ||_2^2 , s.t. \sum_i w_i = s, w_i >= 0
Parameters
----------
v: (n,) numpy array,
n-dimensional vector to project
s: int, optional, default: 1,
radius of the simplex
Returns
-------
w: (n,) numpy array,
Euclidean projection of v on the simplex
Notes
-----
The complexity of this algorithm is in O(n log(n)) as it involves sorting v.
Better alternatives exist for high-dimensional sparse vectors (cf. [1])
However, this implementation still easily scales to millions of dimensions.
References
----------
[1] Efficient Projections onto the .1-Ball for Learning in High Dimensions
<NAME>, <NAME>, <NAME>, and <NAME>.
International Conference on Machine Learning (ICML 2008)
http://www.cs.berkeley.edu/~jduchi/projects/DuchiSiShCh08.pdf
"""
assert s > 0, "Radius s must be strictly positive (%d <= 0)" % s
n, = v.shape # will raise ValueError if v is not 1-D
# check if we are already on the simplex
if v.sum() == s and numpy.alltrue(v >= 0):
# best projection: itself!
return v
# get the array of cumulative sums of a sorted (decreasing) copy of v
u = numpy.sort(v)[::-1]
cssv = numpy.cumsum(u)
# get the number of > 0 components of the optimal solution
rho = numpy.nonzero(u * numpy.arange(1, n+1) > (cssv - s))[0][-1]
# compute the Lagrange multiplier associated to the simplex constraint
theta = float(cssv[rho] - s) / rho
# compute the projection by thresholding v using theta
w = (v - theta).clip(min=0)
return w
def projection_simplex_sort(v, z=1):
n_features = v.shape[0]
u = numpy.sort(v)[::-1]
cssv = numpy.cumsum(u) - z
ind = numpy.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = numpy.maximum(v - theta, 0)
return w
def projection_simplex_pivot(v, z=1, random_state=None):
rs = numpy.random.RandomState(random_state)
n_features = len(v)
U = numpy.arange(n_features)
s = 0
rho = 0
while len(U) > 0:
G = []
L = []
k = U[rs.randint(0, len(U))]
ds = v[k]
for j in U:
if v[j] >= v[k]:
if j != k:
ds += v[j]
G.append(j)
elif v[j] < v[k]:
L.append(j)
drho = len(G) + 1
if s + ds - (rho + drho) * v[k] < z:
s += ds
rho += drho
U = L
else:
U = G
theta = (s - z) / float(rho)
return numpy.maximum(v - theta, 0)
def projection_simplex_bisection(v, z=1, tau=0.0001, max_iter=1000):
lower = 0
upper = numpy.max(v)
current = numpy.inf
for it in xrange(max_iter):
if numpy.abs(current) / z < tau and current < 0:
break
theta = (upper + lower) / 2.0
w = numpy.maximum(v - theta, 0)
current = numpy.sum(w) - z
if current <= 0:
upper = theta
else:
lower = theta
return w
def project_ball(array, epsilon=1, ord=2):
"""
Compute the orthogonal projection of the input tensor (as vector) onto the L_ord epsilon-ball.
**Assumes the first dimension to be batch dimension, which is preserved.**
:param array: array
:type array: numpy.ndarray
:param epsilon: radius of ball.
:type epsilon: float
:param ord: order of norm
:type ord: int
:return: projected vector
:rtype: torch.autograd.Variable or torch.Tensor
"""
assert isinstance(array, numpy.ndarray), 'given tensor should be numpy.ndarray'
if ord == 0:
assert epsilon >= 1
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
sorted = numpy.sort(array, axis=1)
k = int(math.ceil(epsilon))
thresholds = sorted[:, -k]
mask = (array >= expand_as(thresholds, array)).astype(float)
array *= mask
elif ord == 1:
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
for i in range(array.shape[0]):
# compute the vector of absolute values
u = numpy.abs(array[i])
# check if v is already a solution
if u.sum() <= epsilon:
# L1-norm is <= s
continue
# v is not already a solution: optimum lies on the boundary (norm == s)
# project *u* on the simplex
#w = project_simplex(u, s=epsilon)
w = projection_simplex_sort(u, z=epsilon)
# compute the solution to the original problem on v
w *= numpy.sign(array[i])
array[i] = w
if len(size) == 4:
array = array.reshape(-1, size[1], size[2], size[3])
elif len(size) == 2:
array = array.reshape(-1, size[1])
elif ord == 2:
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
clamped = numpy.clip(epsilon/numpy.linalg.norm(array, 2, axis=1), a_min=None, a_max=1)
clamped = clamped.reshape(-1, 1)
array = array * clamped
if len(size) == 4:
array = array.reshape(-1, size[1], size[2], size[3])
elif len(size) == 2:
array = array.reshape(-1, size[1])
elif ord == float('inf'):
array = numpy.clip(array, a_min=-epsilon, a_max=epsilon)
else:
raise NotImplementedError()
return array
def project_sphere(array, epsilon=1, ord=2):
"""
Compute the orthogonal projection of the input tensor (as vector) onto the L_ord epsilon-ball.
**Assumes the first dimension to be batch dimension, which is preserved.**
:param array: variable or tensor
:type array: torch.autograd.Variable or torch.Tensor
:param epsilon: radius of ball.
:type epsilon: float
:param ord: order of norm
:type ord: int
:return: projected vector
:rtype: torch.autograd.Variable or torch.Tensor
"""
assert isinstance(array, numpy.ndarray), 'given tensor should be numpy.ndarray'
size = array.shape
flattened_size = numpy.prod(numpy.array(size[1:]))
array = array.reshape(-1, flattened_size)
array = array/numpy.linalg.norm(array, axis=1, ord=ord).reshape(-1, 1)
array *= epsilon
if len(size) == 4:
array = array.reshape(-1, size[1], size[2], size[3])
elif len(size) == 2:
array = array.reshape(-1, size[1])
return array
def project_orthogonal(basis, vectors, rank=None):
"""
Project the given vectors on the basis using an orthogonal projection.
:param basis: basis vectors to project on
:type basis: numpy.ndarray
:param vectors: vectors to project
:type vectors: numpy.ndarray
:return: projection
:rtype: numpy.ndarray
"""
# The columns of Q are an orthonormal basis of the columns of basis
Q, R = numpy.linalg.qr(basis)
if rank is not None and rank > 0:
Q = Q[:, :rank]
# As Q is orthogonal, the projection is
beta = Q.T.dot(vectors)
projection = Q.dot(beta)
return projection
def project_lstsq(basis, vectors):
"""
Project using least squares.
:param basis: basis vectors to project on
:type basis: numpy.ndarray
:param vectors: vectors to project
:type vectors: numpy.ndarray
:return: projection
:rtype: numpy.ndarray
"""
x, _, _, _ = numpy.linalg.lstsq(basis, vectors)
projection = basis.dot(x)
return projection
def angles(vectors_a, vectors_b):
"""
Compute angle between two sets of vectors.
See https://people.eecs.berkeley.edu/~wkahan/Mindless.pdf.
:param vectors_a:
:param vectors_b:
:return:
"""
if len(vectors_b.shape) == 1:
vectors_b = vectors_b.reshape(-1, 1)
# Normalize vector
norms_a = numpy.linalg.norm(vectors_a, ord=2, axis=0)
norms_b = numpy.linalg.norm(vectors_b, ord=2, axis=0)
norms_a = numpy.repeat(norms_a.reshape(1, -1), vectors_a.shape[0], axis=0)
norms_b = numpy.repeat(norms_b.reshape(1, -1), vectors_b.shape[0], axis=0)
vectors_a /= norms_a
vectors_b /= norms_b
term_1 = | numpy.multiply(vectors_a, norms_b) | numpy.multiply |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import shapely
import shapely.geometry
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_single_bool()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_invert()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
test_SegmentationMapOnImage_bool()
test_SegmentationMapOnImage_get_arr_int()
# test_SegmentationMapOnImage_get_arr_bool()
test_SegmentationMapOnImage_draw()
test_SegmentationMapOnImage_draw_on_image()
test_SegmentationMapOnImage_pad()
test_SegmentationMapOnImage_pad_to_aspect_ratio()
test_SegmentationMapOnImage_scale()
test_SegmentationMapOnImage_to_heatmaps()
test_SegmentationMapOnImage_from_heatmaps()
test_SegmentationMapOnImage_copy()
test_SegmentationMapOnImage_deepcopy()
test_Polygon___init__()
test_Polygon_xx()
test_Polygon_yy()
test_Polygon_xx_int()
test_Polygon_yy_int()
test_Polygon_is_valid()
test_Polygon_area()
test_Polygon_project()
test_Polygon__compute_inside_image_point_mask()
test_Polygon_is_fully_within_image()
test_Polygon_is_partly_within_image()
test_Polygon_is_out_of_image()
test_Polygon_cut_out_of_image()
test_Polygon_clip_out_of_image()
test_Polygon_shift()
test_Polygon_draw_on_image()
test_Polygon_extract_from_image()
test_Polygon_to_shapely_polygon()
test_Polygon_to_bounding_box()
test_Polygon_from_shapely()
test_Polygon_copy()
test_Polygon_deepcopy()
test_Polygon___repr__()
test_Polygon___str__()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) is True
for value in values_false:
assert ia.is_np_array(value) is False
def test_is_single_integer():
assert ia.is_single_integer("A") is False
assert ia.is_single_integer(None) is False
assert ia.is_single_integer(1.2) is False
assert ia.is_single_integer(1.0) is False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False
assert ia.is_single_integer(1) is True
assert ia.is_single_integer(1234) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True
def test_is_single_float():
assert ia.is_single_float("A") is False
assert ia.is_single_float(None) is False
assert ia.is_single_float(1.2) is True
assert ia.is_single_float(1.0) is True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True
assert ia.is_single_float(1) is False
assert ia.is_single_float(1234) is False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) is True
for value in values_false:
assert ia.is_single_number(value) is False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) is True, value
for value in values_false:
assert ia.is_iterable(value) is False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0],
_Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) is True
for value in values_false:
assert ia.is_string(value) is False
def test_is_single_bool():
class _Dummy(object):
pass
values_true = [False, True]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)]
for value in values_true:
assert ia.is_single_bool(value) is True
for value in values_false:
assert ia.is_single_bool(value) is False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) is True
for value in values_false:
assert ia.is_integer_array(value) is False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) is True
for value in values_false:
assert ia.is_float_array(value) is False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = np.random.RandomState(10017)
assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000)
reseed()
def test_current_random_state():
assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE
def test_new_random_state():
seed = 1000
ia.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=False)
rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=False)
rs_observed2 = ia.new_random_state(seed=None, fully_random=False)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
ia.seed(seed)
np.random.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=True)
rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=True)
rs_observed2 = ia.new_random_state(seed=None, fully_random=True)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=1234)
rs_observed2 = ia.new_random_state(seed=1234)
rs_expected = np.random.RandomState(1234)
assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_dummy_random_state():
assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6)
def test_copy_random_state():
rs = np.random.RandomState(1017)
rs_copy = ia.copy_random_state(rs)
assert rs != rs_copy
assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6)
assert ia.copy_random_state(np.random) == np.random
assert ia.copy_random_state(np.random, force_copy=True) != np.random
def test_derive_random_state():
rs = np.random.RandomState(1017)
rs_observed = ia.derive_random_state(np.random.RandomState(1017))
rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6))
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_derive_random_states():
rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2)
seed = np.random.RandomState(1017).randint(0, 10**6)
rs_expected1 = np.random.RandomState(seed+0)
rs_expected2 = np.random.RandomState(seed+1)
assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6)
assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6)
def test_forward_random_state():
rs1 = np.random.RandomState(1017)
rs2 = np.random.RandomState(1017)
ia.forward_random_state(rs1)
rs2.uniform()
assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6)
def test_imresize_many_images():
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for c in [1, 3]:
image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, c), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, c), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, c), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, c), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, c), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, c), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
for images_this_iter in [images, list(images)]: # test for ndarray and list(ndarray) input
for interpolation in interpolations:
images_same_observed = ia.imresize_many_images(images_this_iter, (16, 16), interpolation=interpolation)
for image_expected, image_observed in zip(images_this_iter, images_same_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
images_small_observed = ia.imresize_many_images(images_this_iter, (8, 8), interpolation=interpolation)
for image_expected, image_observed in zip(images_small, images_small_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
images_large_observed = ia.imresize_many_images(images_this_iter, (32, 32), interpolation=interpolation)
for image_expected, image_observed in zip(images_large, images_large_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
# test size given as single int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 8)
assert observed.shape == (1, 8, 8, 3)
# test size given as single float
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 2.0)
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 0.5)
assert observed.shape == (1, 2, 2, 3)
# test size given as (float, float)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 2.0))
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 0.5))
assert observed.shape == (1, 2, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 0.5))
assert observed.shape == (1, 8, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 2.0))
assert observed.shape == (1, 2, 8, 3)
# test size given as int+float or float+int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (11, 2.0))
assert observed.shape == (1, 11, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 11))
assert observed.shape == (1, 8, 11, 3)
# test no channels
images = np.zeros((1, 4, 4), dtype=np.uint8)
images_rs = ia.imresize_many_images(images, (2, 2))
assert images_rs.shape == (1, 2, 2)
images = [np.zeros((4, 4), dtype=np.uint8)]
images_rs = ia.imresize_many_images(images, (2, 2))
assert isinstance(images_rs, list)
assert images_rs[0].shape == (2, 2)
# test len 0 input
observed = ia.imresize_many_images(np.zeros((0, 8, 8, 3), dtype=np.uint8), (4, 4))
assert ia.is_np_array(observed)
assert observed.dtype.type == np.uint8
assert len(observed) == 0
observed = ia.imresize_many_images([], (4, 4))
assert isinstance(observed, list)
assert len(observed) == 0
# test images with zero height/width
images = [np.zeros((0, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((4, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((0, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
# test invalid sizes
sizes_all = [(-1, 2), (0, 2)]
sizes_all = sizes_all\
+ [(float(a), b) for a, b in sizes_all]\
+ [(a, float(b)) for a, b in sizes_all]\
+ [(float(a), float(b)) for a, b in sizes_all]\
+ [(-a, -b) for a, b in sizes_all]\
+ [(-float(a), -b) for a, b in sizes_all]\
+ [(-a, -float(b)) for a, b in sizes_all]\
+ [(-float(a), -float(b)) for a, b in sizes_all]
sizes_all = sizes_all\
+ [(b, a) for a, b in sizes_all]
sizes_all = sizes_all\
+ [-1.0, 0.0, -1, 0]
for sizes in sizes_all:
images = [np.zeros((4, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=sizes)
except Exception as exc:
assert "value is zero or lower than zero." in str(exc)
got_exception = True
assert got_exception
# test list input but all with same shape
images = [np.zeros((8, 8, 3), dtype=np.uint8) for _ in range(2)]
observed = ia.imresize_many_images(images, (4, 4))
assert isinstance(observed, list)
assert all([image.shape == (4, 4, 3) for image in observed])
assert all([image.dtype.type == np.uint8 for image in observed])
def test_imresize_single_image():
for c in [-1, 1, 3]:
image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
if c == -1:
images = images[:, :, 0]
images_small = images_small[:, :, 0]
images_large = images_large[:, :, 0]
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
for image in images:
image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation)
diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
for image, image_expected in zip(images, images_small):
image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
for image, image_expected in zip(images, images_large):
image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_pad():
# -------
# uint8, int32
# -------
for dtype in [np.uint8, np.int32]:
arr = np.zeros((3, 3), dtype=dtype) + 255
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.array_equal(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, -1] == 0)
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[-1, :] == 0)
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, 0] == 0)
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
assert np.all(arr_pad[:, -2:] == 0)
assert np.all(arr_pad[-3:, :] == 0)
assert np.all(arr_pad[:, :4] == 0)
arr_pad = ia.pad(arr, top=1, cval=10)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 10)
arr = np.zeros((3, 3, 3), dtype=dtype) + 128
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :, 0] == 0)
assert np.all(arr_pad[0, :, 1] == 0)
assert np.all(arr_pad[0, :, 2] == 0)
arr = np.zeros((3, 3), dtype=dtype) + 128
arr[1, 1] = 200
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 128
assert arr_pad[0, 1] == 200
assert arr_pad[0, 2] == 128
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=123)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 123
assert arr_pad[0, 1] == 123
assert arr_pad[0, 2] == 123
assert arr_pad[1, 0] == 0
arr = np.zeros((1, 1), dtype=dtype) + 100
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=200)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 200
assert arr_pad[1, 0] == 175
assert arr_pad[2, 0] == 150
assert arr_pad[3, 0] == 125
assert arr_pad[4, 0] == 100
# -------
# float32, float64
# -------
for dtype in [np.float32, np.float64]:
arr = np.zeros((3, 3), dtype=dtype) + 1.0
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6
arr_pad = ia.pad(arr, top=1, cval=0.2)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2]))
arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0]))
arr = np.zeros((3, 3), dtype=dtype) + 0.5
arr[1, 1] = 0.75
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6
assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6
assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=0.4)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.4 - 1e-6 < arr_pad[0, 0] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 1] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 2] < 0.4 + 1e-6
assert 0.0 - 1e-6 < arr_pad[1, 0] < 0.0 + 1e-6
arr = np.zeros((1, 1), dtype=dtype) + 0.6
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=1.0)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert 1.0 - 1e-6 < arr_pad[0, 0] < 1.0 + 1e-6
assert 0.9 - 1e-6 < arr_pad[1, 0] < 0.9 + 1e-6
assert 0.8 - 1e-6 < arr_pad[2, 0] < 0.8 + 1e-6
assert 0.7 - 1e-6 < arr_pad[3, 0] < 0.7 + 1e-6
assert 0.6 - 1e-6 < arr_pad[4, 0] < 0.6 + 1e-6
def test_compute_paddings_for_aspect_ratio():
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 0
assert bottom == 0
assert left == 0
arr = np.zeros((1, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 2
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 1), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 1
arr = np.zeros((2, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 1
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 2), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 1
assert bottom == 0
assert left == 1
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5)
assert top == 2
assert right == 0
assert bottom == 2
assert left == 0
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 2
def test_pad_to_aspect_ratio():
for dtype in [np.uint8, np.int32, np.float32]:
# aspect_ratio = 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((1, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 1), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((2, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 2), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
# aspect_ratio != 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 0.5)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 8
assert arr_pad.shape[1] == 4
# 3d arr
arr = np.zeros((4, 2, 3), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
assert arr_pad.shape[2] == 3
# cval
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 0
assert np.max(arr_pad[:, -2:]) == 0
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 10
assert np.max(arr_pad[:, -2:]) == 10
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6
assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6
assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
# mode
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr[1:3, 1:3] = 200
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum")
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[0:1, 0:2]) == 128
assert np.max(arr_pad[1:3, 0:2]) == 200
assert np.max(arr_pad[3:, 0:2]) == 128
assert np.max(arr_pad[0:1, -2:]) == 128
assert np.max(arr_pad[1:3, -2:]) == 200
assert np.max(arr_pad[3:, -2:]) == 128
# TODO add tests for return_pad_values=True
def test_pool():
# basic functionality with uint8, int32, float32
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.int32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# preserve_dtype off
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == np.float64
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# maximum function
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.max)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
# 3d array
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr = np.tile(arr[..., np.newaxis], (1, 1, 3))
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2, 3)
assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1])
assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2])
arr_pooled = arr_pooled[..., 0]
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
# block_size per axis
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, (2, 1), np.average)
assert arr_pooled.shape == (2, 4)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 4]))
assert np.allclose(arr_pooled[0, 1], np.average([1, 5]))
assert np.allclose(arr_pooled[0, 2], np.average([2, 6]))
assert np.allclose(arr_pooled[0, 3], np.average([3, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 12]))
assert np.allclose(arr_pooled[1, 1], np.average([9, 13]))
assert np.allclose(arr_pooled[1, 2], np.average([10, 14]))
assert np.allclose(arr_pooled[1, 3], np.average([11, 15]))
# cval
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0]))
assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0]))
arr = np.uint8([
[0, 1],
[4, 5]
])
arr_pooled = ia.pool(arr, (4, 1), np.average)
assert arr_pooled.shape == (1, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0]))
assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0]))
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average, cval=22)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22]))
assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22]))
def test_avg_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.avg_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
def test_max_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.max_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
def test_draw_grid():
image = np.zeros((2, 2, 3), dtype=np.uint8)
image[0, 0] = 64
image[0, 1] = 128
image[1, 0] = 192
image[1, 1] = 256
grid = ia.draw_grid([image], rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image], rows=1, cols=2)
expected = np.hstack([image, image])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
def test_Keypoint():
eps = 1e-8
# x/y/x_int/y_int
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# project
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# shift
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# __repr__ / __str_
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# on()
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# draw_on_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# shift
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# get_coords_array
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.get_coords_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# from_coords_array
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# to_keypoint_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# from_keypoint_image()
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
_ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20,
nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# copy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# deepcopy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# repr/str
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \
+ "shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
def test_BoundingBox():
eps = 1e-8
# properties with ints
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
assert bb.width == 40 - 20
assert bb.height == 30 - 10
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# wrong order of y1/y2, x1/x2
bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
# properties with floats
bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
assert bb.width == 40.9 - 20.1
assert bb.height == 30.9 - 10.1
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter is False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 10 * 10 + 10 * 10 - 5 * 5
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) is True
assert bb.is_fully_within_image((20, 100, 3)) is False
assert bb.is_fully_within_image((100, 30, 3)) is False
assert bb.is_fully_within_image((1, 1, 3)) is False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) is True
assert bb.is_partly_within_image((20, 100, 3)) is True
assert bb.is_partly_within_image((100, 30, 3)) is True
assert bb.is_partly_within_image((1, 1, 3)) is False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) is False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
eps = np.finfo(np.float32).eps
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert 20 - 2*eps < bb_cut.y2 < 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert 30 - 2*eps < bb_cut.x2 < 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
eps = np.finfo(np.float32).eps
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert 50 - 2*eps < bbsoi_cut.bounding_boxes[1].x2 < 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
def test_HeatmapsOnImage_draw_on_image():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127)
assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254)
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127)
def test_HeatmapsOnImage_invert():
heatmaps_arr = np.float32([
[0.0, 5.0, 10.0],
[-1.0, -2.0, 7.5]
])
expected = np.float32([
[8.0, 3.0, -2.0],
[9.0, 10.0, 0.5]
])
# (H, W)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr)
assert np.allclose(heatmaps.invert().get_arr(), expected)
# (H, W, 1)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr[..., np.newaxis], shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr[..., np.newaxis])
assert np.allclose(heatmaps.invert().get_arr(), expected[..., np.newaxis])
def test_HeatmapsOnImage_pad():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
def test_HeatmapsOnImage_avg_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
def test_HeatmapsOnImage_max_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
def test_HeatmapsOnImage_scale():
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale((4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_SegmentationMapOnImage_bool():
# Test for #189 (boolean mask inputs into SegmentationMapOnImage not working)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
def test_SegmentationMapOnImage_get_arr_int():
arr = np.int32([
[0, 0, 1],
[0, 2, 1],
[1, 3, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr_c0 = np.float32([
[0.1, 0.1, 0.1],
[0.1, 0.9, 0.1],
[0.0, 0.1, 0.0]
])
arr_c1 = np.float32([
[0.2, 1.0, 0.2],
[0.2, 0.8, 0.2],
[0.0, 0.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 0.0, 0.0],
[0.3, 0.7, 0.3],
[0.1, 0.0, 0.0001]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis],
arr_c2[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
expected = np.int32([
[2, 2, 2],
[3, 1, 3],
[3, 1, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
got_exception = False
try:
_ = segmap.get_arr_int(background_class_id=2)
except Exception as exc:
assert "The background class id may only be changed if " in str(exc)
got_exception = True
assert got_exception
observed = segmap.get_arr_int(background_threshold=0.21)
expected = np.int32([
[0, 2, 0],
[3, 1, 3],
[0, 0, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
def test_SegmentationMapOnImage_draw():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
# simple example with 2 classes
observed = segmap.draw()
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# same example, with resizing to 2x the size
observed = segmap.draw(size=(6, 6))
expected = ia.imresize_single_image(expected, (6, 6), interpolation="nearest")
assert np.array_equal(observed, expected)
# custom choice of colors
col0 = (10, 10, 10)
col1 = (50, 51, 52)
observed = segmap.draw(colors=[col0, col1])
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# background_threshold, background_class and foreground mask
arr_c0 = np.float32([
[0, 0, 0],
[1.0, 0, 0],
[0, 0, 0]
])
arr_c1 = np.float32([
[0, 1, 1],
[0, 1, 1],
[0.1, 1, 1]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed, observed_fg = segmap.draw(background_threshold=0.01, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col2, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[True, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
# background_threshold, background_class and foreground mask
# here with higher threshold so that bottom left pixel switches to background
observed, observed_fg = segmap.draw(background_threshold=0.11, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col0, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[False, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
def test_SegmentationMapOnImage_draw_on_image():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
# only image visible
observed = segmap.draw_on_image(image, alpha=0)
assert np.array_equal(observed, image)
# only segmap visible
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# only segmap visible - in foreground
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=False)
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[image[0, 0, :], col1, col1],
[image[1, 0, :], col1, col1],
[image[2, 0, :], col1, col1]
])
assert np.array_equal(observed, expected)
# overlay without background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=False)
col1 = np.uint8(ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1])
expected = np.float32([
[image[0, 0, :], a0*image[0, 1, :] + a1*col1, a0*image[0, 2, :] + a1*col1],
[image[1, 0, :], a0*image[1, 1, :] + a1*col1, a0*image[1, 2, :] + a1*col1],
[image[2, 0, :], a0*image[2, 1, :] + a1*col1, a0*image[2, 2, :] + a1*col1]
])
d_max = np.max(np.abs(observed.astype(np.float32) - expected))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# overlay with background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of segmap to image
arr = np.int32([
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="segmentation_map")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of image to segmap
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(1, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
image_rs = ia.imresize_single_image(image, arr.shape[0:2], interpolation="cubic")
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="image")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image_rs + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
def test_SegmentationMapOnImage_pad():
arr = np.int32([
[0, 1, 1],
[0, 2, 1],
[0, 1, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="edge")
assert np.allclose(observed, expected)
def test_SegmentationMapOnImage_pad_to_aspect_ratio():
arr = np.int32([
[0, 1, 1],
[0, 2, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 3), nb_classes=3)
segmap_padded = segmap.pad_to_aspect_ratio(1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(1.0, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(1.0, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="edge")
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(0.5)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((2, 2), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded, pad_amounts = segmap.pad_to_aspect_ratio(0.5, return_pad_amounts=True)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((2, 2), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
assert pad_amounts == (2, 0, 2, 0)
def test_SegmentationMapOnImage_scale():
arr = np.int32([
[0, 1],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
segmap_scaled = segmap.scale((4, 4))
observed = segmap_scaled.arr
expected = np.clip(ia.imresize_single_image(segmap.arr, (4, 4), interpolation="cubic"), 0, 1.0)
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
segmap_scaled = segmap.scale((4, 4), interpolation="nearest")
observed = segmap_scaled.arr
expected = ia.imresize_single_image(segmap.arr, (4, 4), interpolation="nearest")
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
segmap_scaled = segmap.scale(2.0)
observed = segmap_scaled.arr
expected = np.clip(ia.imresize_single_image(segmap.arr, 2.0, interpolation="cubic"), 0, 1.0)
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
def test_SegmentationMapOnImage_to_heatmaps():
arr = np.int32([
[0, 1],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps = segmap.to_heatmaps()
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c1 = np.float32([
[0.0, 1.0],
[0.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 0.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
# only_nonempty when all are nonempty
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c1 = np.float32([
[0.0, 1.0],
[0.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 0.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
assert len(class_indices) == 3
assert [idx in class_indices for idx in [0, 1, 2]]
# only_nonempty when one is empty and two are nonempty
arr = np.int32([
[0, 2],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
assert len(class_indices) == 2
assert [idx in class_indices for idx in [0, 2]]
# only_nonempty when all are empty
arr_c0 = np.float32([
[0.0, 0.0],
[0.0, 0.0]
])
arr = arr_c0[..., np.newaxis]
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
assert heatmaps is None
assert len(class_indices) == 0
# only_nonempty when all are empty and not_none_if_no_nonempty is True
arr_c0 = np.float32([
[0.0, 0.0],
[0.0, 0.0]
])
arr = arr_c0[..., np.newaxis]
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True, not_none_if_no_nonempty=True)
assert np.allclose(heatmaps.arr_0to1, np.zeros((2, 2), dtype=np.float32))
assert len(class_indices) == 1
assert [idx in class_indices for idx in [0]]
def test_SegmentationMapOnImage_from_heatmaps():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
heatmaps = ia.HeatmapsOnImage.from_0to1(arr, shape=(2, 2))
segmap = ia.SegmentationMapOnImage.from_heatmaps(heatmaps)
assert np.allclose(segmap.arr, arr)
# with class_indices
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c2[..., np.newaxis]], axis=2)
heatmaps = ia.HeatmapsOnImage.from_0to1(arr, shape=(2, 2))
segmap = ia.SegmentationMapOnImage.from_heatmaps(heatmaps, class_indices=[0, 2], nb_classes=4)
expected_c0 = np.copy(arr_c0)
expected_c1 = np.zeros(arr_c0.shape)
expected_c2 = np.copy(arr_c2)
expected_c3 = np.zeros(arr_c0.shape)
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis],
expected_c3[..., np.newaxis]
], axis=2)
assert np.allclose(segmap.arr, expected)
def test_SegmentationMapOnImage_copy():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2))
observed = segmap.copy()
assert np.allclose(observed.arr, segmap.arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == segmap.nb_classes
assert observed.input_was == segmap.input_was
arr = np.int32([
[0, 1],
[2, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=10)
observed = segmap.copy()
assert np.array_equal(observed.get_arr_int(), arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == 10
assert observed.input_was == segmap.input_was
def test_SegmentationMapOnImage_deepcopy():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2))
observed = segmap.deepcopy()
assert np.allclose(observed.arr, segmap.arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == segmap.nb_classes
assert observed.input_was == segmap.input_was
segmap.arr[0, 0, 0] = 0.0
assert not np.allclose(observed.arr, segmap.arr)
arr = np.int32([
[0, 1],
[2, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=10)
observed = segmap.deepcopy()
assert np.array_equal(observed.get_arr_int(), segmap.get_arr_int())
assert observed.shape == (2, 2)
assert observed.nb_classes == 10
assert observed.input_was == segmap.input_was
segmap.arr[0, 0, 0] = 0.0
segmap.arr[0, 0, 1] = 1.0
assert not np.array_equal(observed.get_arr_int(), segmap.get_arr_int())
def test_Polygon___init__():
# exterior is list of Keypoint or
poly = ia.Polygon([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=0.5, y=2.5)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is list of tuple of floats
poly = ia.Polygon([(0.0, 0.0), (1.0, 1.0), (0.5, 2.5)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is list of tuple of integer
poly = ia.Polygon([(0, 0), (1, 1), (1, 3)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[1.0, 3.0]
])
)
# exterior is (N,2) ndarray
poly = ia.Polygon(
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is (N,2) ndarray in float64
poly = ia.Polygon(
np.float64([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# arrays without points
poly = ia.Polygon([])
assert poly.exterior.dtype.type == np.float32
assert poly.exterior.shape == (0, 2)
poly = ia.Polygon(np.zeros((0, 2), dtype=np.float32))
assert poly.exterior.dtype.type == np.float32
assert poly.exterior.shape == (0, 2)
# bad array shape
got_exception = False
try:
_ = ia.Polygon(np.zeros((8,), dtype=np.float32))
except:
got_exception = True
assert got_exception
# label
poly = ia.Polygon([(0, 0)])
assert poly.label is None
poly = ia.Polygon([(0, 0)], label="test")
assert poly.label == "test"
def test_Polygon_xx():
poly = ia.Polygon([(0, 0), (1, 0), (1.5, 0), (4.1, 1), (2.9, 2.0)])
assert poly.xx.dtype.type == np.float32
assert np.allclose(poly.xx, np.float32([0.0, 1.0, 1.5, 4.1, 2.9]))
poly = ia.Polygon([])
assert poly.xx.dtype.type == np.float32
assert poly.xx.shape == (0,)
def test_Polygon_yy():
poly = ia.Polygon([(0, 0), (0, 1), (0, 1.5), (1, 4.1), (2.0, 2.9)])
assert poly.yy.dtype.type == np.float32
assert np.allclose(poly.yy, np.float32([0.0, 1.0, 1.5, 4.1, 2.9]))
poly = ia.Polygon([])
assert poly.yy.dtype.type == np.float32
assert poly.yy.shape == (0,)
def test_Polygon_xx_int():
poly = ia.Polygon([(0, 0), (1, 0), (1.5, 0), (4.1, 1), (2.9, 2.0)])
assert poly.xx_int.dtype.type == np.int32
assert np.allclose(poly.xx_int, np.int32([0, 1, 2, 4, 3]))
poly = ia.Polygon([])
assert poly.xx_int.dtype.type == np.int32
assert poly.xx_int.shape == (0,)
def test_Polygon_yy_int():
poly = ia.Polygon([(0, 0), (0, 1), (0, 1.5), (1, 4.1), (2.0, 2.9)])
assert poly.yy_int.dtype.type == np.int32
assert np.allclose(poly.yy_int, np.int32([0, 1, 2, 4, 3]))
poly = ia.Polygon([])
assert poly.yy_int.dtype.type == np.int32
assert poly.yy_int.shape == (0,)
def test_Polygon_is_valid():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_valid
poly = ia.Polygon([])
assert not poly.is_valid
poly = ia.Polygon([(0, 0)])
assert not poly.is_valid
poly = ia.Polygon([(0, 0), (1, 0)])
assert not poly.is_valid
poly = ia.Polygon([(0, 0), (1, 0), (-1, 0.5), (1, 1), (0, 1)])
assert not poly.is_valid
poly = ia.Polygon([(0, 0), (1, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_valid
def test_Polygon_area():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.area == 1
assert 1.0 - 1e-8 < poly.area < 1.0 + 1e-8
poly = ia.Polygon([(0, 0), (2, 0), (2, 1), (0, 1)])
assert poly.area == 2
assert 2.0 - 1e-8 < poly.area < 2.0 + 1e-8
poly = ia.Polygon([(0, 0), (1, 1), (0, 1)])
assert 1/2 - 1e-8 < poly.area < 1/2 + 1e-8
def test_Polygon_project():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_proj = poly.project((1, 1), (1, 1))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (4, 2)
assert np.allclose(
poly_proj.exterior,
np.float32([
[0, 0],
[1, 0],
[1, 1],
[0, 1]
])
)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_proj = poly.project((1, 1), (2, 2))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (4, 2)
assert np.allclose(
poly_proj.exterior,
np.float32([
[0, 0],
[2, 0],
[2, 2],
[0, 2]
])
)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
poly_proj = poly.project((1, 1), (2, 1))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (4, 2)
assert np.allclose(
poly_proj.exterior,
np.float32([
[0, 0],
[1, 0],
[1, 2],
[0, 2]
])
)
poly = ia.Polygon([])
poly_proj = poly.project((1, 1), (2, 2))
assert poly_proj.exterior.dtype.type == np.float32
assert poly_proj.exterior.shape == (0, 2)
def test_Polygon__compute_inside_image_point_mask():
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
mask = poly._compute_inside_image_point_mask((1, 1, 3))
assert np.array_equal(mask, np.array([True, True, True, True], dtype=bool))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
mask = poly._compute_inside_image_point_mask((1, 1, 3))
assert np.array_equal(mask, np.array([True, False, False, False], dtype=bool))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
mask = poly._compute_inside_image_point_mask((1, 1))
assert np.array_equal(mask, np.array([True, False, False, False], dtype=bool))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
mask = poly._compute_inside_image_point_mask(np.zeros((1, 1, 3), dtype=np.uint8))
assert np.array_equal(mask, np.array([True, False, False, False], dtype=bool))
def test_Polygon_is_fully_within_image():
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_fully_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_fully_within_image((1, 1))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_fully_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert not poly.is_fully_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert not poly.is_fully_within_image((1, 1))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert not poly.is_fully_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_fully_within_image((1, 1, 3))
def test_Polygon_is_partly_within_image():
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_partly_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_partly_within_image((1, 1))
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert poly.is_partly_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_partly_within_image((1, 1, 3))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_partly_within_image((1, 1))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
assert poly.is_partly_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_partly_within_image((1, 1, 3))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_partly_within_image((1, 1))
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
assert not poly.is_partly_within_image(np.zeros((1, 1, 3), dtype=np.uint8))
def test_Polygon_is_out_of_image():
for shape in [(1, 1, 3), (1, 1), np.zeros((1, 1, 3), dtype=np.uint8)]:
poly = ia.Polygon([(0, 0), (0.999, 0), (0.999, 0.999), (0, 0.999)])
assert not poly.is_out_of_image(shape, partly=False, fully=False)
assert not poly.is_out_of_image(shape, partly=True, fully=False)
assert not poly.is_out_of_image(shape, partly=False, fully=True)
assert not poly.is_out_of_image(shape, partly=True, fully=True)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
shape = np.zeros((1, 1, 3), dtype=np.uint8)
assert not poly.is_out_of_image(shape, partly=False, fully=False)
assert poly.is_out_of_image(shape, partly=True, fully=False)
assert not poly.is_out_of_image(shape, partly=False, fully=True)
assert poly.is_out_of_image(shape, partly=True, fully=True)
poly = ia.Polygon([(100, 100), (101, 100), (101, 101), (100, 101)])
shape = (1, 1, 3)
assert not poly.is_out_of_image(shape, partly=False, fully=False)
assert not poly.is_out_of_image(shape, partly=True, fully=False)
assert poly.is_out_of_image(shape, partly=False, fully=True)
assert poly.is_out_of_image(shape, partly=True, fully=True)
def test_Polygon_cut_out_of_image():
_test_Polygon_cut_clip(lambda poly, image: poly.cut_out_of_image(image))
def test_Polygon_clip_out_of_image():
_test_Polygon_cut_clip(lambda poly, image: poly.clip_out_of_image(image))
def _test_Polygon_cut_clip(func):
# poly inside image
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label=None)
image = np.zeros((1, 1, 3), dtype=np.uint8)
multipoly_clipped = func(poly, image)
assert isinstance(multipoly_clipped, ia.MultiPolygon)
assert len(multipoly_clipped.geoms) == 1
assert multipoly_clipped.geoms[0].exterior_almost_equals(poly.exterior)
assert multipoly_clipped.geoms[0].label is None
# square poly shifted by x=0.5, y=0.5 => half out of image
poly = ia.Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)], label="test")
image = np.zeros((1, 1, 3), dtype=np.uint8)
multipoly_clipped = func(poly, image)
assert isinstance(multipoly_clipped, ia.MultiPolygon)
assert len(multipoly_clipped.geoms) == 1
assert multipoly_clipped.geoms[0].exterior_almost_equals(np.float32([
[0.5, 0.5],
[1.0, 0.5],
[1.0, 1.0],
[0.5, 1.0]
]))
assert multipoly_clipped.geoms[0].label == "test"
# non-square poly, with one rectangle on the left side of the image and one on the right side,
# both sides are connected by a thin strip below the image
# after clipping it should become two rectangles
poly = ia.Polygon([(-0.1, 0.0), (0.4, 0.0), (0.4, 1.1), (0.6, 1.1), (0.6, 0.0), (1.1, 0.0),
(1.1, 1.2), (-0.1, 1.2)],
label="test")
image = np.zeros((1, 1, 3), dtype=np.uint8)
multipoly_clipped = func(poly, image)
assert isinstance(multipoly_clipped, ia.MultiPolygon)
assert len(multipoly_clipped.geoms) == 2
assert multipoly_clipped.geoms[0].exterior_almost_equals(np.float32([
[0.0, 0.0],
[0.4, 0.0],
[0.4, 1.0],
[0.0, 1.0]
]))
assert multipoly_clipped.geoms[0].label == "test"
assert multipoly_clipped.geoms[1].exterior_almost_equals(np.float32([
[0.6, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[0.6, 1.0]
]))
assert multipoly_clipped.geoms[0].label == "test"
def test_Polygon_shift():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1), (0, 1)], label="test")
# make sure that shift does not change poly inplace
poly_shifted = poly.shift(top=1)
assert np.allclose(poly.exterior, np.float32([
[0, 0],
[1, 0],
[1, 1],
[0, 1]
]))
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 1],
[1, 1],
[1, 2],
[0, 2]
]))
for v in [1, 0, -1, 0.5]:
# top/bottom
poly_shifted = poly.shift(top=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 0 + v],
[1, 0 + v],
[1, 1 + v],
[0, 1 + v]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(bottom=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 0 - v],
[1, 0 - v],
[1, 1 - v],
[0, 1 - v]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(top=v, bottom=-v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0, 0 + 2*v],
[1, 0 + 2*v],
[1, 1 + 2*v],
[0, 1 + 2*v]
]))
assert poly_shifted.label == "test"
# left/right
poly_shifted = poly.shift(left=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0 + v, 0],
[1 + v, 0],
[1 + v, 1],
[0 + v, 1]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(right=v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0 - v, 0],
[1 - v, 0],
[1 - v, 1],
[0 - v, 1]
]))
assert poly_shifted.label == "test"
poly_shifted = poly.shift(left=v, right=-v)
assert np.allclose(poly_shifted.exterior, np.float32([
[0 + 2 * v, 0],
[1 + 2 * v, 0],
[1 + 2 * v, 1],
[0 + 2 * v, 1]
]))
assert poly_shifted.label == "test"
def test_Polygon_draw_on_image():
image = np.tile(np.arange(100).reshape(10, 10, 1), (1, 1, 3)).astype(np.uint8)
# simple drawing of square
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[2:9, 8:9, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # right boundary
assert np.all(image_poly[2:3, 2:9, c_idx] == np.zeros((1, 7), dtype=np.uint8) + value) # top boundary
assert np.all(image_poly[8:9, 2:9, c_idx] == np.zeros((1, 7), dtype=np.uint8) + value) # bottom boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (5, 5, 1))
assert np.all(image_poly[3:8, 3:8, :] == expected)
# TODO test drawing on float32, float64 image
# drawing of poly that is half out of image
poly = ia.Polygon([(2, 2+5), (8, 2+5), (8, 8+5), (2, 8+5)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2+5:, 2:3, c_idx] == np.zeros((3, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[2+5:, 8:9, c_idx] == np.zeros((3, 1), dtype=np.uint8) + value) # right boundary
assert np.all(image_poly[2+5:3+5, 2:9, c_idx] == np.zeros((1, 7), dtype=np.uint8) + value) # top boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (2, 5, 1))
assert np.all(image_poly[3+5:, 3:8, :] == expected)
# drawing of poly that is half out of image, with raise_if_out_of_image=True
poly = ia.Polygon([(2, 2+5), (8, 2+5), (8, 8+5), (0, 8+5)])
got_exception = False
try:
_ = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=True)
except Exception as exc:
assert "Cannot draw polygon" in str(exc)
got_exception = True
assert not got_exception # only polygons fully outside of the image plane lead to exceptions
# drawing of poly that is fully out of image
poly = ia.Polygon([(100, 100), (100+10, 100), (100+10, 100+10), (100, 100+10)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert np.array_equal(image_poly, image)
# drawing of poly that is fully out of image, with raise_if_out_of_image=True
poly = ia.Polygon([(100, 100), (100+10, 100), (100+10, 100+10), (100, 100+10)])
got_exception = False
try:
_ = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=1.0,
raise_if_out_of_image=True)
except Exception as exc:
assert "Cannot draw polygon" in str(exc)
got_exception = True
assert got_exception
# face invisible via alpha
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=0.0, alpha_perimeter=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((7, 1), dtype=np.uint8) + value) # left boundary
assert np.all(image_poly[3:8, 3:8, :] == image[3:8, 3:8, :])
# boundary invisible via alpha
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image,
color=[32, 128, 32], color_perimeter=[0, 255, 0],
alpha=1.0, alpha_perimeter=0.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.sum(image) == 3 * np.sum(np.arange(100)) # draw did not change original image (copy=True)
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (6, 6, 1))
assert np.all(image_poly[2:8, 2:8, :] == expected)
# copy=False
# test deactivated as the function currently does not offer a copy argument
"""
image_cp = np.copy(image)
poly = ia.Polygon([(2, 2), (8, 2), (8, 8), (2, 8)])
image_poly = poly.draw_on_image(image_cp,
color_face=[32, 128, 32], color_boundary=[0, 255, 0],
alpha_face=1.0, alpha_boundary=1.0,
raise_if_out_of_image=False)
assert image_poly.dtype.type == np.uint8
assert image_poly.shape == (10, 10, 3)
assert np.all(image_cp == image_poly)
assert not np.all(image_cp == image)
for c_idx, value in enumerate([0, 255, 0]):
assert np.all(image_poly[2:9, 2:3, c_idx] == np.zeros((6, 1, 3), dtype=np.uint8) + value) # left boundary
assert np.all(image_cp[2:9, 2:3, c_idx] == np.zeros((6, 1, 3), dtype=np.uint8) + value) # left boundary
expected = np.tile(np.uint8([32, 128, 32]).reshape((1, 1, 3)), (5, 5, 1))
assert np.all(image_poly[3:8, 3:8, :] == expected)
assert np.all(image_cp[3:8, 3:8, :] == expected)
"""
def test_Polygon_extract_from_image():
image = np.arange(20*20*2).reshape(20, 20, 2).astype(np.int32)
# inside image and completely covers it
poly = ia.Polygon([(0, 0), (10, 0), (10, 10), (0, 10)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:10, 0:10, :])
# inside image, subpart of it (not all may be extracted)
poly = ia.Polygon([(1, 1), (9, 1), (9, 9), (1, 9)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[1:9, 1:9, :])
# inside image, two image areas that don't belong to the polygon but have to be extracted
poly = ia.Polygon([(0, 0), (10, 0), (10, 5), (20, 5),
(20, 20), (10, 20), (10, 5), (0, 5)])
subimage = poly.extract_from_image(image)
expected = np.copy(image)
expected[:5, 10:, :] = 0 # top right block
expected[5:, :10, :] = 0 # left bottom block
assert np.array_equal(subimage, expected)
# partially out of image
poly = ia.Polygon([(-5, 0), (5, 0), (5, 10), (-5, 10)])
subimage = poly.extract_from_image(image)
expected = np.zeros((10, 10, 2), dtype=np.int32)
expected[0:10, 5:10, :] = image[0:10, 0:5, :]
assert np.array_equal(subimage, expected)
# fully out of image
poly = ia.Polygon([(30, 0), (40, 0), (40, 10), (30, 10)])
subimage = poly.extract_from_image(image)
expected = np.zeros((10, 10, 2), dtype=np.int32)
assert np.array_equal(subimage, expected)
# inside image, subpart of it
# float coordinates, rounded so that the whole image will be extracted
poly = ia.Polygon([(0.4, 0.4), (9.6, 0.4), (9.6, 9.6), (0.4, 9.6)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:10, 0:10, :])
# inside image, subpart of it
# float coordinates, rounded so that x/y 0<=i<9 will be extracted (instead of 0<=i<10)
poly = ia.Polygon([(0.5, 0.5), (9.4, 0.5), (9.4, 9.4), (0.5, 9.4)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[0:9, 0:9, :])
# inside image, subpart of it
# float coordinates, rounded so that x/y 1<=i<9 will be extracted (instead of 0<=i<10)
poly = ia.Polygon([(0.51, 0.51), (9.4, 0.51), (9.4, 9.4), (0.51, 9.4)])
subimage = poly.extract_from_image(image)
assert np.array_equal(subimage, image[1:9, 1:9, :])
def test_Polygon_change_first_point_by_coords():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=0, y=0)
assert np.allclose(poly.exterior, poly_reordered.exterior)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=0)
# make sure that it does not reorder inplace
assert np.allclose(poly.exterior, np.float32([[0, 0], [1, 0], [1, 1]]))
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 1], [0, 0], [1, 0]]))
# inaccurate point, but close enough
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=0.1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
# inaccurate point, but close enough (infinite max distance)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=None)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
# point too far away
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
got_exception = False
try:
_ = poly.change_first_point_by_coords(x=1.0, y=0.01, max_distance=0.001)
except Exception as exc:
assert "Closest found point " in str(exc)
got_exception = True
assert got_exception
# reorder with two points
poly = ia.Polygon([(0, 0), (1, 0)])
poly_reordered = poly.change_first_point_by_coords(x=1, y=0)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [0, 0]]))
# reorder with one point
poly = ia.Polygon([(0, 0)])
poly_reordered = poly.change_first_point_by_coords(x=0, y=0)
assert np.allclose(poly_reordered.exterior, np.float32([[0, 0]]))
def test_Polygon_change_first_point_by_index():
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(0)
assert np.allclose(poly.exterior, poly_reordered.exterior)
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(1)
# make sure that it does not reorder inplace
assert np.allclose(poly.exterior, np.float32([[0, 0], [1, 0], [1, 1]]))
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [1, 1], [0, 0]]))
poly = ia.Polygon([(0, 0), (1, 0), (1, 1)])
poly_reordered = poly.change_first_point_by_index(2)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 1], [0, 0], [1, 0]]))
# reorder with two points
poly = ia.Polygon([(0, 0), (1, 0)])
poly_reordered = poly.change_first_point_by_index(1)
assert np.allclose(poly_reordered.exterior, np.float32([[1, 0], [0, 0]]))
# reorder with one point
poly = ia.Polygon([(0, 0)])
poly_reordered = poly.change_first_point_by_index(0)
assert np.allclose(poly_reordered.exterior, | np.float32([[0, 0]]) | numpy.float32 |
import copy
import numpy as np
from Classes.Uncertainty import Uncertainty
from Classes.QComp import QComp
from Classes.MovingBedTests import MovingBedTests
from Classes.TransectData import TransectData
class QAData(object):
"""Evaluates and stores quality assurance characteristics and messages.
Attributes
----------
q_run_threshold_caution: int
Caution threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_run_threshold_warning: int
Warning threshold for interpolated discharge for a run of invalid ensembles, in percent.
q_total_threshold_caution: int
Caution threshold for total interpolated discharge for invalid ensembles, in percent.
q_total_threshold_warning: int
Warning threshold for total interpolated discharge for invalid ensembles, in percent.
transects: dict
Dictionary of quality assurance checks for transects
system_tst: dict
Dictionary of quality assurance checks on the system test(s)
compass: dict
Dictionary of quality assurance checks on compass calibration and evaluations
temperature: dict
Dictionary of quality assurance checks on temperature comparions and variation
movingbed: dict
Dictionary of quality assurance checks on moving-bed tests
user: dict
Dictionary of quality assurance checks on user input data
boat: dict
Dictionary of quality assurance checks on boat velocities
bt_vel: dict
Dictionary of quality assurance checks on bottom track velocities
gga_vel: dict
Dictionary of quality assurance checks on gga boat velocities
vtg_vel: dict
Dictionary of quality assurance checks on vtg boat velocities
w_vel: dict
Dictionary of quality assurance checks on water track velocities
extrapolation: dict
Dictionary of quality assurance checks on extrapolations
edges: dict
Dictionary of quality assurance checks on edges
"""
def __init__(self, meas, mat_struct=None, compute=True):
"""Checks the measurement for all quality assurance issues.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Set default thresholds
self.q_run_threshold_caution = 3
self.q_run_threshold_warning = 5
self.q_total_threshold_caution = 10
self.q_total_threshold_warning = 25
# Initialize instance variables
self.transects = dict()
self.system_tst = dict()
self.compass = dict()
self.temperature = dict()
self.movingbed = dict()
self.user = dict()
self.depths = dict()
self.boat = dict()
self.bt_vel = dict()
self.gga_vel = dict()
self.vtg_vel = dict()
self.w_vel = dict()
self.extrapolation = dict()
self.edges = dict()
self.settings_dict = dict()
self.settings_dict['tab_compass'] = 'Default'
self.settings_dict['tab_tempsal'] = 'Default'
self.settings_dict['tab_mbt'] = 'Default'
self.settings_dict['tab_bt'] = 'Default'
self.settings_dict['tab_gps'] = 'Default'
self.settings_dict['tab_depth'] = 'Default'
self.settings_dict['tab_wt'] = 'Default'
self.settings_dict['tab_extrap'] = 'Default'
self.settings_dict['tab_edges'] = 'Default'
if compute:
# Apply QA checks
self.transects_qa(meas)
self.system_tst_qa(meas)
self.compass_qa(meas)
self.temperature_qa(meas)
self.moving_bed_qa(meas)
self.user_qa(meas)
self.depths_qa(meas)
self.boat_qa(meas)
self.water_qa(meas)
self.extrapolation_qa(meas)
self.edges_qa(meas)
self.check_bt_setting(meas)
self.check_wt_settings(meas)
self.check_depth_settings(meas)
self.check_gps_settings(meas)
self.check_edge_settings(meas)
self.check_extrap_settings(meas)
self.check_tempsal_settings(meas)
self.check_mbt_settings(meas)
self.check_compass_settings(meas)
else:
self.populate_from_qrev_mat(meas, mat_struct)
def populate_from_qrev_mat(self, meas, meas_struct):
"""Populates the object using data from previously saved QRev Matlab file.
Parameters
----------
meas: Measurement
Object of Measurement
meas_struct: mat_struct
Matlab data structure obtained from sio.loadmat
"""
# Generate a new QA object using the measurement data and the current QA code.
# When QA checks from the current QA are not available from old QRev files, these
# checks will be included to supplement the old QRev file data.
new_qa = QAData(meas)
if hasattr(meas_struct, 'qa'):
# Set default thresholds
self.q_run_threshold_caution = meas_struct.qa.qRunThresholdCaution
self.q_run_threshold_warning = meas_struct.qa.qRunThresholdWarning
self.q_total_threshold_caution = meas_struct.qa.qTotalThresholdCaution
self.q_total_threshold_warning = meas_struct.qa.qTotalThresholdWarning
# Initialize instance variables
self.transects = dict()
self.transects['duration'] = meas_struct.qa.transects.duration
self.transects['messages'] = self.make_list(meas_struct.qa.transects.messages)
self.transects['number'] = meas_struct.qa.transects.number
self.transects['recip'] = meas_struct.qa.transects.recip
self.transects['sign'] = meas_struct.qa.transects.sign
self.transects['status'] = meas_struct.qa.transects.status
self.transects['uncertainty'] = meas_struct.qa.transects.uncertainty
self.system_tst = dict()
self.system_tst['messages'] = self.make_list(meas_struct.qa.systemTest.messages)
self.system_tst['status'] = meas_struct.qa.systemTest.status
self.compass = dict()
self.compass['messages'] = self.make_list(meas_struct.qa.compass.messages)
self.compass['status'] = meas_struct.qa.compass.status
self.compass['status1'] = meas_struct.qa.compass.status1
self.compass['status2'] = meas_struct.qa.compass.status2
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'magvar'):
self.compass['magvar'] = meas_struct.qa.compass.magvar
else:
self.compass['magvar'] = new_qa.compass['magvar']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'magvarIdx'):
self.compass['magvar_idx'] = self.make_array(meas_struct.qa.compass.magvarIdx)
else:
self.compass['magvar_idx'] = new_qa.compass['magvar_idx']
# Changed mag_error_idx from bool to int array in QRevPy
self.compass['mag_error_idx'] = new_qa.compass['mag_error_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'pitchMeanWarningIdx'):
self.compass['pitch_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanWarningIdx)
else:
self.compass['pitch_mean_warning_idx'] = new_qa.compass['pitch_mean_warning_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'rollMeanWarningIdx'):
self.compass['roll_mean_warning_idx'] = self.make_array(meas_struct.qa.compass.rollMeanWarningIdx)
else:
self.compass['roll_mean_warning_idx'] = new_qa.compass['roll_mean_warning_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'pitchMeanCautionIdx'):
self.compass['pitch_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchMeanCautionIdx)
else:
self.compass['pitch_mean_caution_idx'] = new_qa.compass['pitch_mean_caution_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'rollMeanCautionIdx'):
self.compass['roll_mean_caution_idx'] = self.make_array(meas_struct.qa.compass.rollMeanCautionIdx)
else:
self.compass['roll_mean_caution_idx'] = new_qa.compass['roll_mean_caution_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'pitchStdCautionIdx'):
self.compass['pitch_std_caution_idx'] = self.make_array(meas_struct.qa.compass.pitchStdCautionIdx)
else:
self.compass['pitch_std_caution_idx'] = new_qa.compass['pitch_std_caution_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.compass, 'rollStdCautionIdx'):
self.compass['roll_std_caution_idx'] = self.make_array(meas_struct.qa.compass.rollStdCautionIdx)
else:
self.compass['roll_std_caution_idx'] = new_qa.compass['roll_std_caution_idx']
self.temperature = dict()
self.temperature['messages'] = self.make_list(meas_struct.qa.temperature.messages)
self.temperature['status'] = meas_struct.qa.temperature.status
self.movingbed = dict()
self.movingbed['messages'] = self.make_list(meas_struct.qa.movingbed.messages)
self.movingbed['status'] = meas_struct.qa.movingbed.status
self.movingbed['code'] = meas_struct.qa.movingbed.code
self.user = dict()
self.user['messages'] = self.make_list(meas_struct.qa.user.messages)
self.user['sta_name'] = bool(meas_struct.qa.user.staName)
self.user['sta_number'] = bool(meas_struct.qa.user.staNumber)
self.user['status'] = meas_struct.qa.user.status
# If QA check not available, get check from new QA
self.depths = self.create_qa_dict(self, meas_struct.qa.depths)
if 'draft' not in self.depths:
self.depths['draft'] = new_qa.depths['draft']
if 'all_invalid' not in self.depths:
self.depths['all_invalid'] = new_qa.depths['all_invalid']
# If QA check not available, get check from new QA
self.bt_vel = self.create_qa_dict(self, meas_struct.qa.btVel, ndim=2)
if 'all_invalid' not in self.bt_vel:
self.bt_vel['all_invalid'] = new_qa.bt_vel['all_invalid']
# If QA check not available, get check from new QA
self.gga_vel = self.create_qa_dict(self, meas_struct.qa.ggaVel, ndim=2)
if 'all_invalid' not in self.gga_vel:
self.gga_vel['all_invalid'] = new_qa.gga_vel['all_invalid']
# If QA check not available, get check from new QA
self.vtg_vel = self.create_qa_dict(self, meas_struct.qa.vtgVel, ndim=2)
if 'all_invalid' not in self.vtg_vel:
self.vtg_vel['all_invalid'] = new_qa.vtg_vel['all_invalid']
# If QA check not available, get check from new QA
self.w_vel = self.create_qa_dict(self, meas_struct.qa.wVel, ndim=2)
if 'all_invalid' not in self.w_vel:
self.w_vel['all_invalid'] = new_qa.w_vel['all_invalid']
self.extrapolation = dict()
self.extrapolation['messages'] = self.make_list(meas_struct.qa.extrapolation.messages)
self.extrapolation['status'] = meas_struct.qa.extrapolation.status
self.edges = dict()
self.edges['messages'] = self.make_list(meas_struct.qa.edges.messages)
self.edges['status'] = meas_struct.qa.edges.status
self.edges['left_q'] = meas_struct.qa.edges.leftQ
self.edges['right_q'] = meas_struct.qa.edges.rightQ
self.edges['left_sign'] = meas_struct.qa.edges.leftSign
self.edges['right_sign'] = meas_struct.qa.edges.rightSign
self.edges['left_zero'] = meas_struct.qa.edges.leftzero
self.edges['right_zero'] = meas_struct.qa.edges.rightzero
self.edges['left_type'] = meas_struct.qa.edges.leftType
self.edges['right_type'] = meas_struct.qa.edges.rightType
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'rightDistMovedIdx'):
self.edges['right_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.rightDistMovedIdx)
else:
self.edges['right_dist_moved_idx'] = new_qa.edges['right_dist_moved_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'leftDistMovedIdx'):
self.edges['left_dist_moved_idx'] = self.make_array(meas_struct.qa.edges.leftDistMovedIdx)
else:
self.edges['left_dist_moved_idx'] = new_qa.edges['left_dist_moved_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'leftQIdx'):
self.edges['left_q_idx'] = self.make_array(meas_struct.qa.edges.leftQIdx)
else:
self.edges['left_q_idx'] = new_qa.edges['left_q_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'rightQIdx'):
self.edges['right_q_idx'] = self.make_array(meas_struct.qa.edges.rightQIdx)
else:
self.edges['right_q_idx'] = new_qa.edges['right_q_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'leftZeroIdx'):
self.edges['left_zero_idx'] = self.make_array(meas_struct.qa.edges.leftZeroIdx)
else:
self.edges['left_zero_idx'] = new_qa.edges['left_zero_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'rightZeroIdx'):
self.edges['right_zero_idx'] = self.make_array(meas_struct.qa.edges.rightZeroIdx)
else:
self.edges['right_zero_idx'] = new_qa.edges['right_zero_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'invalid_transect_left_idx'):
self.edges['invalid_transect_left_idx'] = \
self.make_array(meas_struct.qa.edges.invalid_transect_left_idx)
elif hasattr(meas_struct.qa.edges, 'invalidTransLeftIdx'):
self.edges['invalid_transect_left_idx'] = \
self.make_array(meas_struct.qa.edges.invalidTransLeftIdx)
else:
self.edges['invalid_transect_left_idx'] = new_qa.edges['invalid_transect_left_idx']
# If QA check not available, get check from new QA
if hasattr(meas_struct.qa.edges, 'invalid_transect_right_idx'):
self.edges['invalid_transect_right_idx'] = \
self.make_array(meas_struct.qa.edges.invalid_transect_right_idx)
elif hasattr(meas_struct.qa, 'invalidTransRightIdx'):
self.edges['invalid_transect_right_idx'] = \
self.make_array(meas_struct.qa.edges.invalidTransRightIdx)
else:
self.edges['invalid_transect_right_idx'] = new_qa.edges['invalid_transect_right_idx']
if hasattr(meas_struct.qa, 'settings_dict'):
self.settings_dict = dict()
self.settings_dict['tab_compass'] = meas_struct.qa.settings_dict.tab_compass
self.settings_dict['tab_tempsal'] = meas_struct.qa.settings_dict.tab_tempsal
self.settings_dict['tab_mbt'] = meas_struct.qa.settings_dict.tab_mbt
self.settings_dict['tab_bt'] = meas_struct.qa.settings_dict.tab_bt
self.settings_dict['tab_gps'] = meas_struct.qa.settings_dict.tab_gps
self.settings_dict['tab_depth'] = meas_struct.qa.settings_dict.tab_depth
self.settings_dict['tab_wt'] = meas_struct.qa.settings_dict.tab_wt
self.settings_dict['tab_extrap'] = meas_struct.qa.settings_dict.tab_extrap
self.settings_dict['tab_edges'] = meas_struct.qa.settings_dict.tab_edges
@staticmethod
def create_qa_dict(self, mat_data, ndim=1):
"""Creates the dictionary used to store QA checks associated with the percent of discharge estimated
by interpolation. This dictionary is used by BT, GPS, Depth, and WT.
Parameters
----------
self: QAData
Object of QAData
mat_data: mat_struct
Matlab data from QRev file
"""
# Initialize dictionary
qa_dict = dict()
# Populate dictionary from Matlab data
qa_dict['messages'] = QAData.make_list(mat_data.messages)
# allInvalid not available in older QRev data
if hasattr(mat_data, 'allInvalid'):
qa_dict['all_invalid'] = self.make_array(mat_data.allInvalid, 1).astype(bool)
qa_dict['q_max_run_caution'] = self.make_array(mat_data.qRunCaution, ndim).astype(bool)
qa_dict['q_max_run_warning'] = self.make_array(mat_data.qRunWarning, ndim).astype(bool)
qa_dict['q_total_caution'] = self.make_array(mat_data.qTotalCaution, ndim).astype(bool)
qa_dict['q_total_warning'] = self.make_array(mat_data.qTotalWarning, ndim).astype(bool)
qa_dict['status'] = mat_data.status
# q_max_run and q_total not available in older QRev data
try:
qa_dict['q_max_run'] = self.make_array(mat_data.qMaxRun, ndim)
qa_dict['q_total'] = self.make_array(mat_data.qTotal, ndim)
except AttributeError:
qa_dict['q_max_run'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
qa_dict['q_total'] = np.tile(np.nan, (len(mat_data.qRunCaution), 6))
return qa_dict
@staticmethod
def make_array(num_in, ndim=1):
"""Ensures that num_in is an array and if not makes it an array.
num_in: any
Any value or array
"""
if type(num_in) is np.ndarray:
if len(num_in.shape) < 2 and ndim > 1:
num_in = np.reshape(num_in, (1, num_in.shape[0]))
return num_in
else:
return num_in
else:
return np.array([num_in])
@staticmethod
def make_list(array_in):
"""Converts a string or array to a list.
Parameters
----------
array_in: any
Data to be converted to list.
Returns
-------
list_out: list
List of array_in data
"""
list_out = []
# Convert string to list
if type(array_in) is str:
list_out = [array_in]
else:
# Empty array
if array_in.size == 0:
list_out = []
# Single message with integer codes at end
elif array_in.size == 3:
if type(array_in[1]) is int or len(array_in[1].strip()) == 1:
temp = array_in.tolist()
if len(temp) > 0:
internal_list = []
for item in temp:
internal_list.append(item)
list_out = [internal_list]
else:
list_out = array_in.tolist()
# Either multiple messages with or without integer codes
else:
list_out = array_in.tolist()
return list_out
def transects_qa(self, meas):
"""Apply quality checks to transects
Parameters
----------
meas: Measurement
Object of class Measurement
"""
# Assume good results
self.transects['status'] = 'good'
# Initialize keys
self.transects['messages'] = []
self.transects['recip'] = 0
self.transects['sign'] = 0
self.transects['duration'] = 0
self.transects['number'] = 0
self.transects['uncertainty'] = 0
# Initialize lists
checked = []
discharges = []
start_edge = []
# Populate lists
for n in range(len(meas.transects)):
checked.append(meas.transects[n].checked)
if meas.transects[n].checked:
discharges.append(meas.discharge[n])
start_edge.append(meas.transects[n].start_edge)
num_checked = np.nansum(np.asarray(checked))
# Check duration
total_duration = 0
if num_checked >= 1:
for transect in meas.transects:
if transect.checked:
total_duration += transect.date_time.transect_duration_sec
# Check duration against USGS policy
if total_duration < 720:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Duration of selected transects is less than 720 seconds;', 2, 0])
self.transects['duration'] = 1
# Check transects for missing ensembles
for transect in meas.transects:
if transect.checked:
# Determine number of missing ensembles
if transect.adcp.manufacturer == 'SonTek':
# Determine number of missing ensembles for SonTek data
idx_missing = np.where(transect.date_time.ens_duration_sec > 1.5)[0]
if len(idx_missing) > 0:
average_ensemble_duration = (np.nansum(transect.date_time.ens_duration_sec)
- np.nansum(transect.date_time.ens_duration_sec[idx_missing])) \
/ (len(transect.date_time.ens_duration_sec) - len(idx_missing))
num_missing = np.round(np.nansum(transect.date_time.ens_duration_sec[idx_missing])
/ average_ensemble_duration) - len(idx_missing)
else:
num_missing = 0
else:
# Determine number of lost ensembles for TRDI data
idx_missing = np.where(np.isnan(transect.date_time.ens_duration_sec) == True)[0]
num_missing = len(idx_missing) - 1
# Save caution message
if num_missing > 0:
self.transects['messages'].append(['Transects: ' + str(transect.file_name) + ' is missing '
+ str(int(num_missing)) + ' ensembles;', 2, 0])
self.transects['status'] = 'caution'
# Check number of transects checked
if num_checked == 0:
# No transects selected
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: No transects selected;', 1, 0])
self.transects['number'] = 2
elif num_checked == 1:
# Only one transect selected
self.transects['status'] = 'caution'
self.transects['messages'].append(['Transects: Only one transect selected;', 2, 0])
self.transects['number'] = 2
else:
self.transects['number'] = num_checked
if num_checked == 2:
# Only 2 transects selected
cov, _ = Uncertainty.uncertainty_q_random(discharges, 'total')
# Check uncertainty
if cov > 2:
self.transects['status'] = 'caution'
self.transects['messages'].append(
['Transects: Uncertainty would be reduced by additional transects;', 2, 0])
# Check for consistent sign
q_positive = []
for q in discharges:
if q.total >= 0:
q_positive.append(True)
else:
q_positive.append(False)
if len(np.unique(q_positive)) > 1:
self.transects['status'] = 'warning'
self.transects['messages'].append(
['TRANSECTS: Sign of total Q is not consistent. One or more start banks may be incorrect;', 1, 0])
# Check for reciprocal transects
num_left = start_edge.count('Left')
num_right = start_edge.count('Right')
if not num_left == num_right:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: Transects selected are not reciprocal transects;', 1, 0])
# Check for zero discharge transects
q_zero = False
for q in discharges:
if q.total == 0:
q_zero = True
if q_zero:
self.transects['status'] = 'warning'
self.transects['messages'].append(['TRANSECTS: One or more transects have zero Q;', 1, 0])
def system_tst_qa(self, meas):
"""Apply QA checks to system test.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.system_tst['messages'] = []
self.system_tst['status'] = 'good'
# Determine if a system test was recorded
if not meas.system_tst:
# No system test data recorded
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(['SYSTEM TEST: No system test;', 1, 3])
else:
pt3_fail = False
num_tests_with_failure = 0
for test in meas.system_tst:
if hasattr(test, 'result'):
# Check for presence of pt3 test
if 'pt3' in test.result and test.result['pt3'] is not None:
# Check hard_limit, high gain, wide bandwidth
if 'hard_limit' in test.result['pt3']:
if 'high_wide' in test.result['pt3']['hard_limit']:
corr_table = test.result['pt3']['hard_limit']['high_wide']['corr_table']
if len(corr_table) > 0:
# All lags past lag 2 should be less than 50% of lag 0
qa_threshold = corr_table[0, :] * 0.5
all_lag_check = np.greater(corr_table[3::, :], qa_threshold)
# Lag 7 should be less than 25% of lag 0
lag_7_check = np.greater(corr_table[7, :], corr_table[0, :] * 0.25)
# If either condition is met for any beam the test fails
if np.sum(np.sum(all_lag_check)) + np.sum(lag_7_check) > 1:
pt3_fail = True
if test.result['sysTest']['n_failed'] is not None and test.result['sysTest']['n_failed'] > 0:
num_tests_with_failure += 1
# pt3 test failure message
if pt3_fail:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more PT3 tests in the system test indicate potential EMI;', 2, 3])
# Check for failed tests
if num_tests_with_failure == len(meas.system_tst):
# All tests had a failure
self.system_tst['status'] = 'warning'
self.system_tst['messages'].append(
['SYSTEM TEST: All system test sets have at least one test that failed;', 1, 3])
elif num_tests_with_failure > 0:
self.system_tst['status'] = 'caution'
self.system_tst['messages'].append(
['System Test: One or more system test sets have at least one test that failed;', 2, 3])
def compass_qa(self, meas):
"""Apply QA checks to compass calibration and evaluation.
Parameters
----------
meas: Measurement
Object of class Measurement
"""
self.compass['messages'] = []
checked = []
for transect in meas.transects:
checked.append(transect.checked)
if np.any(checked):
heading = np.unique(meas.transects[checked.index(1)].sensors.heading_deg.internal.data)
else:
heading = np.array([0])
# Initialize variable as if ADCP has no compass
self.compass['status'] = 'inactive'
self.compass['status1'] = 'good'
self.compass['status2'] = 'good'
self.compass['magvar'] = 0
self.compass['magvar_idx'] = []
self.compass['mag_error_idx'] = []
self.compass['pitch_mean_warning_idx'] = []
self.compass['pitch_mean_caution_idx'] = []
self.compass['pitch_std_caution_idx'] = []
self.compass['roll_mean_warning_idx'] = []
self.compass['roll_mean_caution_idx'] = []
self.compass['roll_std_caution_idx'] = []
if len(heading) > 1 and np.any(np.not_equal(heading, 0)):
# ADCP has a compass
# A compass calibration is required if a loop test or GPS are used
# Check for loop test
loop = False
for test in meas.mb_tests:
if test.type == 'Loop':
loop = True
# Check for GPS data
gps = False
if meas.transects[checked.index(True)].boat_vel.gga_vel is not None or \
meas.transects[checked.index(True)].boat_vel.vtg_vel is not None:
gps = True
if gps or loop:
# Compass calibration is required
# Determine the ADCP manufacturer
if meas.transects[checked.index(True)].adcp.manufacturer == 'SonTek':
# SonTek ADCP
if len(meas.compass_cal) == 0:
# No compass calibration
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration;', 1, 4])
elif meas.compass_cal[-1].result['compass']['error'] == 'N/A':
# If the error cannot be decoded from the calibration assume the calibration is good
self.compass['status1'] = 'good'
else:
if meas.compass_cal[-1].result['compass']['error'] <= 0.2:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Calibration result > 0.2 deg;', 2, 4])
elif meas.transects[checked.index(True)].adcp.manufacturer == 'TRDI':
# TRDI ADCP
if len(meas.compass_cal) == 0:
# No compass calibration
if len(meas.compass_eval) == 0:
# No calibration or evaluation
self.compass['status1'] = 'warning'
self.compass['messages'].append(['COMPASS: No compass calibration or evaluation;', 1, 4])
else:
# No calibration but an evaluation was completed
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass calibration;', 2, 4])
else:
# Compass was calibrated
if len(meas.compass_eval) == 0:
# No compass evaluation
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: No compass evaluation;', 2, 4])
else:
# Check results of evaluation
try:
if float(meas.compass_eval[-1].result['compass']['error']) <= 1:
self.compass['status1'] = 'good'
else:
self.compass['status1'] = 'caution'
self.compass['messages'].append(['Compass: Evaluation result > 1 deg;', 2, 4])
except ValueError:
self.compass['status1'] = 'good'
else:
# Compass not required
if len(meas.compass_cal) == 0 and len(meas.compass_eval) == 0:
# No compass calibration or evaluation
self.compass['status1'] = 'default'
else:
# Compass was calibrated and evaluated
self.compass['status1'] = 'good'
# Check for consistent magvar and pitch and roll mean and variation
magvar = []
align = []
mag_error_exceeded = []
pitch_mean = []
pitch_std = []
pitch_exceeded = []
roll_mean = []
roll_std = []
roll_exceeded = []
transect_idx = []
for n, transect in enumerate(meas.transects):
if transect.checked:
transect_idx.append(n)
heading_source_selected = getattr(
transect.sensors.heading_deg, transect.sensors.heading_deg.selected)
pitch_source_selected = getattr(transect.sensors.pitch_deg, transect.sensors.pitch_deg.selected)
roll_source_selected = getattr(transect.sensors.roll_deg, transect.sensors.roll_deg.selected)
magvar.append(transect.sensors.heading_deg.internal.mag_var_deg)
if transect.sensors.heading_deg.external is not None:
align.append(transect.sensors.heading_deg.external.align_correction_deg)
pitch_mean.append(np.nanmean(pitch_source_selected.data))
pitch_std.append(np.nanstd(pitch_source_selected.data, ddof=1))
roll_mean.append(np.nanmean(roll_source_selected.data))
roll_std.append(np.nanstd(roll_source_selected.data, ddof=1))
# SonTek G3 compass provides pitch, roll, and magnetic error parameters that can be checked
if transect.adcp.manufacturer == 'SonTek':
if heading_source_selected.pitch_limit is not None:
# Check for bug in SonTek data where pitch and roll was n x 3 use n x 1
if len(pitch_source_selected.data.shape) == 1:
pitch_data = pitch_source_selected.data
else:
pitch_data = pitch_source_selected.data[:, 0]
idx_max = np.where(pitch_data > heading_source_selected.pitch_limit[0])[0]
idx_min = np.where(pitch_data < heading_source_selected.pitch_limit[1])[0]
if len(idx_max) > 0 or len(idx_min) > 0:
pitch_exceeded.append(True)
else:
pitch_exceeded.append(False)
if heading_source_selected.roll_limit is not None:
if len(roll_source_selected.data.shape) == 1:
roll_data = roll_source_selected.data
else:
roll_data = roll_source_selected.data[:, 0]
idx_max = np.where(roll_data > heading_source_selected.pitch_limit[0])[0]
idx_min = | np.where(roll_data < heading_source_selected.pitch_limit[1]) | numpy.where |
#!/usr/bin/env python
"""
Code to create the batch files for fitting
"""
from __future__ import print_function
import os
import glob
import argparse
import tables
#####
import numpy as np
from astropy.table import Table
from astropy.io import fits
#####
def setup_batch_beast_fit(projectname,
datafile,
num_percore=5,
nice=None,
overwrite_logfile=True,
prefix=None):
"""
Sets up batch files for submission to the 'at' queue on
linux (or similar) systems
Parameters
----------
project : string
project name to use (basename for files)
datafile : string
file with the observed data (FITS file) - the observed photometry,
not the sub-files
num_percore : int (default = 5)
number of fitting runs per core
nice : int (default = None)
set this to an integer (-20 to 20) to prepend a "nice" level
to the fitting command
overwrite_logfile : boolean (default = True)
if True, will overwrite the log file; if False, will append to
existing log file
prefix : string (default=None)
Set this to a string (such as 'source activate astroconda') to prepend
to each batch file (use '\n's to make multiple lines)
Returns
-------
run_info_dict : dict
Dictionary indicating which catalog files have complete modeling, and
which job files need to be run
"""
project = projectname
cat_files = np.array(sorted(glob.glob(datafile.replace('.fits',
'*_sub*.fits'))))
datafile_basename = datafile.split('/')[-1].replace('.fits', '')
n_cat_files = len(cat_files)
n_pernode_files = num_percore
# setup the subdirectory for the batch and log files
job_path = project+'/fit_batch_jobs/'
if not os.path.isdir(job_path):
os.mkdir(job_path)
log_path = job_path+'logs/'
if not os.path.isdir(log_path):
os.mkdir(log_path)
pf_open = False
cur_f = 0
cur_total_size = 0.0
j = -1
# keep track of which files are done running
run_info_dict = {'cat_file': cat_files,
'done': np.full(n_cat_files, False),
'files_to_run': []}
# cat_files = cat_files[0:2]
for i, cat_file in enumerate(cat_files):
# get the sd number
dpos = cat_file.find('SD_')
spos = cat_file.find('sub')
ppos = cat_file.rfind('.')
sd_num = cat_file[dpos+3:spos-1]
sub_num = cat_file[spos+3:ppos]
# read the stats file and see if this subregion is done yet
# results_path = project + '/'
basename = "%s/%s_sd%s_sub%s" % (project, project, sd_num, sub_num)
stats_file = basename + '_stats.fits'
pdf1d_file = basename + '_pdf1d.fits'
lnp_file = basename + '_lnp.hd5'
reg_run = False
run_done = False
if not os.path.isfile(stats_file):
reg_run = True
print('no stats file')
if not os.path.isfile(pdf1d_file):
reg_run = True
print('no pdf1d file')
if not os.path.isfile(lnp_file):
reg_run = True
print('no lnp file')
# first check if the pdf1d mass spacing is correct
if not reg_run:
hdulist = fits.open(pdf1d_file)
delta1 = (hdulist['M_ini'].data[-1, 1]
- hdulist['M_ini'].data[-1, 0])
if delta1 > 1.0: # old linear spacing
print('pdf1d lin mass spacing - full refitting needed')
old_mass_spacing = True
else:
old_mass_spacing = False
print('pdf1d log mass spacing - ok')
if old_mass_spacing:
run_done = False
reg_run = True
# now check if the number of results is the same as
# the number of observations
if not reg_run:
# get the observed catalog
obs = Table.read(cat_file)
# get the fit results catalog
t = Table.read(stats_file)
# get the number of stars that have been fit
indxs, = np.where(t['Pmax'] != 0.0)
# get the number of entries in the lnp file
f = tables.open_file(lnp_file, 'r')
nlnp = f.root._v_nchildren - 2
f.close()
print('# obs, stats, lnp = ', len(obs), len(indxs), nlnp)
if (len(indxs) == len(obs)) & (nlnp == len(obs)):
# final check, is the pdf1d file correctly populated
tot_prob = | np.sum(hdulist['M_ini'].data, axis=1) | numpy.sum |
from functools import partial
import numpy as np
from deep_np import layers, losses, utils
def _init_fc_weights(in_dim, out_dim, include_bias=True):
weights = np.random.randn(in_dim, out_dim) / np.sqrt(in_dim / 2.)
if include_bias:
return weights, np.zeros((1, out_dim))
return weights
def _init_conv_weights(n_filters, n_channels, filter_size, include_bias=True):
weights = np.random.randn(n_filters, n_channels, filter_size,
filter_size) / np.sqrt(n_filters / 2.)
if include_bias:
return weights, np.zeros((n_filters, 1))
return weights
class NeuralNetwork(object):
def __init__(self, *args, **kwargs):
pass
def predict_proba(self, X):
logits, _ = self.forward(X, train=False)
return utils.softmax(logits)
def predict(self, X):
return np.argmax(self.predict_proba(X), axis=1)
def train_step(self, X_train, y_train):
logits, cache = self.forward(X_train)
loss = losses.cross_entropy(logits, y_train)
grad = self.backward(logits, y_train, cache)
return grad, loss
def forward(self, X, train=True):
raise NotImplementedError()
def backward(self, logits, y_train, cache):
raise NotImplementedError()
# Network for pong policy gradient
class PongNetwork(NeuralNetwork):
def __init__(self, input_dim, hidden_dim=128, n_cls=3):
W1, b1 = _init_fc_weights(input_dim, hidden_dim)
W2, b2 = _init_fc_weights(hidden_dim, n_cls)
self.model = dict(W1=W1, b1=b1, W2=W2, b2=b2)
def forward(self, X, train=True):
h1, h1_cache = layers.fc_forward(X, self.model["W1"], self.model["b1"])
h1, nl1_cache = layers.relu_forward(h1)
logits, logits_cache = layers.fc_forward(h1, self.model["W2"],
self.model["b2"])
return logits, dict(h1=h1_cache, nl1=nl1_cache, logits=logits_cache)
# slightly different API to accomodate policy gradient
def backward(self, grad_y, cache):
dh1, dW2, db2 = layers.fc_backward(grad_y, cache["logits"])
dh1 = layers.relu_backward(dh1, cache["nl1"])
dX, dW1, db1 = layers.fc_backward(dh1, cache["h1"])
grad = dict(W1=dW1, b1=db1, W2=dW2, b2=db2)
return grad
class FeedForwardNetwork(NeuralNetwork):
def __init__(self, in_dim=784, hidden_dim=128, p_dropout=0.7, n_cls=10):
self.p_dropout = p_dropout
W1, b1 = _init_fc_weights(in_dim, hidden_dim)
beta1 = np.ones((1, hidden_dim))
gamma1 = np.ones((1, hidden_dim))
W2, b2 = _init_fc_weights(hidden_dim, hidden_dim)
beta2 = np.ones((1, hidden_dim))
gamma2 = np.ones((1, hidden_dim))
W3, b3 = _init_fc_weights(hidden_dim, n_cls)
self.model = dict(
W1=W1,
b1=b1,
beta1=beta1,
gamma1=gamma1,
W2=W2,
b2=b2,
beta2=beta2,
gamma2=gamma2,
W3=W3,
b3=b3)
self.bn_caches = dict(
b1_mean=np.zeros((1, H)),
b1_var=np.zeros((1, H)),
b2_mean=np.zeros((1, H)),
b2_var= | np.zeros((1, H)) | numpy.zeros |
import open3d
import numpy as np
import cv2
import scipy.io as sio
from transforms3d.quaternions import quat2mat, mat2quat
global cnt
cnt = 0
def visualize(im, depth, label, centers, cls_indexes):
global cnt
cnt += 1
h,w = label.shape
label_m = np.zeros((h,w,3), dtype=np.uint8)
for cls in cls_indexes: #np.unique(label):
label_m[label==cls] = np.random.randint(0,255,size=3)
for c in centers:
cv2.circle(im, tuple(c.astype(np.int32)), 3, (0,255,0), -1)
bboxes = get_bboxes(label, cls_indexes)
for bbox in bboxes:
cv2.rectangle(im, tuple(bbox[:2]), tuple(bbox[2:]), (0,255,0))
cv2.imshow('im%d'%(cnt), im)
cv2.imshow('depth', depth)
cv2.imshow('label', label_m)
def visualize_pose(im, cls_indexes, poses, points, intrinsic_matrix):
im_copy = im.copy()
for ix,cls in enumerate(cls_indexes):
color = np.random.randint(0,255,size=(3))
cls_pts = points[cls]
x3d = np.ones((4, len(cls_pts)), dtype=np.float32)
x3d[0, :] = cls_pts[:,0]
x3d[1, :] = cls_pts[:,1]
x3d[2, :] = cls_pts[:,2]
# projection
RT = np.zeros((3, 4), dtype=np.float32)
pose = poses[ix]
RT[:,:3] = pose[:, :3]
RT[:,3] = pose[:, 3]
print(RT)
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
x = np.transpose(x2d[:2,:], [1,0]).astype(np.int32)
for px in x:
# im_copy[px[1],px[0]] = color
cv2.circle(im_copy, tuple(px), 3, color, -1)
# plt.plot(x2d[0, :], x2d[1, :], '.', color=np.divide(colors[cls], 255.0), alpha=0.5)
cv2.imshow("proj_poses%d"%(cnt), im_copy)
def get_bboxes(label, cls_indexes):
bboxes = []
for cls in cls_indexes:
y, x = | np.where(label==cls) | numpy.where |
"""
Module containing classes for ray tracing through the ice.
Ray tracer classes correspond to ray trace path classes, where the ray
tracer is responsible for calculating the existence and launch angle of
paths between points, and the ray tracer path objects are responsible for
returning information about propagation along their respective path.
"""
import logging
import numpy as np
import scipy.constants
import scipy.fft
import scipy.optimize
from pyrex.internal_functions import normalize, LazyMutableClass, lazy_property
from pyrex.ice_model import AntarcticIce, UniformIce, ice
logger = logging.getLogger(__name__)
class BasicRayTracePath(LazyMutableClass):
"""
Class for representing a single ray-trace solution between points.
Stores parameters of the ray path with calculations performed by
integrating z-steps of size ``dz``. Most properties are lazily evaluated
to save on computation time. If any attributes of the class instance are
changed, the lazily-evaluated properties will be cleared.
Parameters
----------
parent_tracer : BasicRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
BasicRayTracer : Class for calculating the ray-trace solutions between
points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
def __init__(self, parent_tracer, launch_angle, direct):
self.from_point = parent_tracer.from_point
self.to_point = parent_tracer.to_point
self.theta0 = launch_angle
self.ice = parent_tracer.ice
self.dz = parent_tracer.dz
self.direct = direct
super().__init__()
@property
def _metadata(self):
"""Metadata dictionary for writing `BasicRayTracePath` information."""
return {
"n0": self.n0,
"dz": self.dz,
"emitted_x": self.emitted_direction[0],
"emitted_y": self.emitted_direction[1],
"emitted_z": self.emitted_direction[2],
"received_x": self.received_direction[0],
"received_y": self.received_direction[1],
"received_z": self.received_direction[2],
"launch_angle": np.arccos(self.emitted_direction[2]),
"receiving_angle": np.pi-np.arccos(self.received_direction[2]),
"path_length": self.path_length,
"tof": self.tof
}
@property
def z_turn_proximity(self):
"""
Parameter for how closely path approaches z_turn.
Necessary to avoid diverging integrals which occur at z_turn.
"""
# Best value of dz/10 determined empirically by checking errors
return self.dz/10
@property
def z0(self):
"""Depth (m) of the launching point."""
return self.from_point[2]
@property
def z1(self):
"""Depth (m) of the receiving point."""
return self.to_point[2]
@lazy_property
def n0(self):
"""Index of refraction of the ice at the launching point."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance (m) between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def phi(self):
"""Azimuthal angle (radians) between the endpoints."""
u = self.to_point - self.from_point
return np.arctan2(u[1], u[0])
@lazy_property
def beta(self):
"""Launching beta parameter (n(z0) * sin(theta0))."""
return self.n0 * np.sin(self.theta0)
@lazy_property
def z_turn(self):
"""Turning depth (m) of the path."""
return self.ice.depth_with_index(self.beta)
# @property
# def exists(self):
# """Boolean of whether the path between the points with the
# given launch angle exists."""
# return True
@lazy_property
def emitted_direction(self):
"""Direction in which ray is emitted."""
return np.array([np.sin(self.theta0) * np.cos(self.phi),
np.sin(self.theta0) * np.sin(self.phi),
np.cos(self.theta0)])
@lazy_property
def received_direction(self):
"""Direction ray is travelling when it is received."""
if self.direct:
sign = np.sign(np.cos(self.theta0))
return np.array([np.sin(self.theta(self.z1)) * np.cos(self.phi),
np.sin(self.theta(self.z1)) * np.sin(self.phi),
sign*np.cos(self.theta(self.z1))])
else:
return np.array([np.sin(self.theta(self.z1)) * np.cos(self.phi),
np.sin(self.theta(self.z1)) * np.sin(self.phi),
-np.cos(self.theta(self.z1))])
def theta(self, z):
"""
Polar angle of the ray at the given depths.
Calculates the polar angle of the ray's direction at the given depth
in the ice. Note that the ray could be travelling upward or downward
at this polar angle.
Parameters
----------
z : array_like
(Negative-valued) depths (m) in the ice.
Returns
-------
array_like
Polar angle at the given values of `z`.
"""
return np.arcsin(np.sin(self.theta0) * self.n0/self.ice.index(z))
# Log-scaled zs (commented out below and in z_integral method) seemed
# like a good idea for reducing dimensionality, but didn't work out.
# Kept here in case it works out better in the future
# @lazy_property
# def dn(self):
# return np.abs(self.ice.gradient(-10)[2])*self.dz
# def _log_scale_zs(self, z0, z1):
# # Base dn on dz at 10 meter depth
# n0 = self.ice.index(z0)
# n1 = self.ice.index(z1)
# n_steps = int(np.abs(n1-n0)/self.dn)
# ns = np.linspace(n0, n1, n_steps+2)
# return self.ice.depth_with_index(ns)
def z_integral(self, integrand):
"""
Calculate the numerical integral of the given integrand.
For the integrand as a function of z, the numerical integral is
calculated along the ray path.
Parameters
----------
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
Returns
-------
float
The value of the numerical integral along the ray path.
"""
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs, dz = np.linspace(self.z0, self.z1, n_zs+1, retstep=True)
return np.trapz(integrand(zs), dx=np.abs(dz), axis=0)
# zs = self._log_scale_zs(self.z0, self.z1)
# return np.trapz(integrand(zs), x=zs, axis=0)
else:
n_zs_1 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z0)/self.dz)
zs_1, dz_1 = np.linspace(self.z0, self.z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
n_zs_2 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z1)/self.dz)
zs_2, dz_2 = np.linspace(self.z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
return (np.trapz(integrand(zs_1), dx=np.abs(dz_1), axis=0) +
np.trapz(integrand(zs_2), dx=np.abs(dz_2), axis=0))
# zs_1 = self._log_scale_zs(self.z0, self.z_turn-self.z_turn_proximity)
# zs_2 = self._log_scale_zs(self.z1, self.z_turn-self.z_turn_proximity)
# return (np.trapz(integrand(zs_1), x=zs_1, axis=0) +
# np.trapz(integrand(zs_2), x=zs_2, axis=0))
@lazy_property
def path_length(self):
"""Length (m) of the ray path."""
return self.z_integral(lambda z: 1/np.cos(self.theta(z)))
@lazy_property
def tof(self):
"""Time of flight (s) along the ray path."""
return self.z_integral(lambda z: self.ice.index(z) / scipy.constants.c
/ np.cos(self.theta(z)))
@lazy_property
def fresnel(self):
"""
Fresnel factors for reflection off the ice surface.
The fresnel reflectance calculated is the square root (ratio of
amplitudes, not powers) for reflection off ice surface (1 if doesn't
reach surface). Stores the s and p polarized reflectances, respectively.
"""
if self.direct or self.z_turn<self.ice.valid_range[1]:
return 1, 1
else:
n_1 = self.ice.index(self.ice.valid_range[1])
n_2 = self.ice.index_above
theta_1 = self.theta(self.ice.valid_range[1])
cos_1 = np.cos(theta_1)
sin_2 = n_1/n_2*np.sin(theta_1)
if sin_2<=1:
# Plain reflection with real coefficients
cos_2 = np.sqrt(1 - (sin_2)**2)
else:
# Total internal reflection off the surface, results in complex
# fresnel factors encoding the phase data
cos_2 = np.sqrt((sin_2)**2 - 1)*1j
# TODO: Confirm sign convention here
r_s = (n_1*cos_1 - n_2*cos_2) / (n_1*cos_1 + n_2*cos_2)
r_p = (n_2*cos_1 - n_1*cos_2) / (n_2*cos_1 + n_1*cos_2)
return r_s, r_p
def attenuation(self, f):
"""
Calculate the attenuation factor for signal frequencies.
Calculates the attenuation factor to be multiplied by the signal
amplitude at the given frequencies.
Parameters
----------
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
Returns
-------
array_like
Attenuation factors for the signal at the frequencies `f`.
"""
fa = np.abs(f)
def integrand(z):
partial_integrand = 1 / np.cos(self.theta(z))
alen = self.ice.attenuation_length(z, fa)
return (partial_integrand / alen.T).T
return np.exp(-np.abs(self.z_integral(integrand)))
def propagate(self, signal=None, polarization=None,
attenuation_interpolation=None):
"""
Propagate the signal with optional polarization along the ray path.
Applies the frequency-dependent signal attenuation along the ray path
and shifts the times according to the ray time of flight. Additionally
provides the s and p polarization directions.
Parameters
----------
signal : Signal, optional
``Signal`` object to propagate.
polarization : array_like, optional
Vector representing the linear polarization of the `signal`.
attenuation_interpolation: float, optional
Logarithmic (base 10) interpolation step to be used for
interpolating attenuation along the ray path. If `None`, no
interpolation is applied and the attenuation is pre-calculated at
the expected signal frequencies.
Returns
-------
tuple of Signal
Tuple of ``Signal`` objects representing the s and p polarizations
of the original `signal` attenuated along the ray path. Only
returned if `signal` was not ``None``.
tuple of ndarray
Tuple of polarization vectors representing the s and p polarization
directions of the `signal` at the end of the ray path. Only
returned if `polarization` was not ``None``.
See Also
--------
pyrex.Signal : Base class for time-domain signals.
"""
if polarization is None:
if signal is None:
return
else:
new_signal = signal.copy()
new_signal.shift(self.tof)
# Pre-calculate attenuation at the designated frequencies to
# save on heavy computation time of the attenuation method
freqs = scipy.fft.fftfreq(2*len(signal.times), d=signal.dt)
if attenuation_interpolation is None:
freqs.sort()
else:
logf_min = np.log10(np.min(freqs[freqs>0]))
logf_max = np.log10(np.max(freqs))
n_steps = int((logf_max - logf_min)
/ attenuation_interpolation)
if (logf_max-logf_min)%attenuation_interpolation:
n_steps += 1
logf = np.logspace(logf_min, logf_max, n_steps+1)
freqs = np.concatenate((-np.flipud(logf), [0], logf))
atten_vals = self.attenuation(freqs)
attenuation = lambda f: np.interp(f, freqs, atten_vals)
new_signal.filter_frequencies(attenuation)
return new_signal
else:
# Unit vectors perpendicular and parallel to plane of incidence
# at the launching point
u_s0 = normalize(np.cross(self.emitted_direction, [0, 0, 1]))
u_p0 = normalize(np.cross(u_s0, self.emitted_direction))
# Unit vector parallel to plane of incidence at the receiving point
# (perpendicular vector stays the same)
u_p1 = normalize(np.cross(u_s0, self.received_direction))
if signal is None:
return (u_s0, u_p1)
else:
# Amplitudes of s and p components
pol_s = np.dot(polarization, u_s0)
pol_p = np.dot(polarization, u_p0)
# Fresnel reflectances of s and p components
r_s, r_p = self.fresnel
# Pre-calculate attenuation at the designated frequencies to
# save on heavy computation time of the attenuation method
freqs = scipy.fft.fftfreq(2*len(signal.times), d=signal.dt)
if attenuation_interpolation is None:
freqs.sort()
else:
logf_min = np.log10(np.min(freqs[freqs>0]))
logf_max = np.log10(np.max(freqs))
n_steps = int((logf_max - logf_min)
/ attenuation_interpolation)
if (logf_max-logf_min)%attenuation_interpolation:
n_steps += 1
logf = np.logspace(logf_min, logf_max, n_steps+1)
freqs = np.concatenate((-np.flipud(logf), [0], logf))
atten_vals = self.attenuation(freqs)
# Apply fresnel s and p coefficients in addition to attenuation
attenuation_s = lambda f: np.interp(f, freqs, atten_vals) * r_s
attenuation_p = lambda f: np.interp(f, freqs, atten_vals) * r_p
signal_s = signal * pol_s
signal_p = signal * pol_p
signal_s.shift(self.tof)
signal_p.shift(self.tof)
signal_s.filter_frequencies(attenuation_s, force_real=True)
signal_p.filter_frequencies(attenuation_p, force_real=True)
return (signal_s, signal_p), (u_s0, u_p1)
@lazy_property
def coordinates(self):
"""
x, y, and z-coordinates along the path (using dz step).
Coordinates are provided for plotting purposes only, and are not vetted
for use in calculations.
"""
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs, dz = np.linspace(self.z0, self.z1, n_zs+1, retstep=True)
integrand = np.tan(self.theta(zs))
rs = np.zeros(len(integrand))
trap_areas = (integrand[:-1] + np.diff(integrand)/2) * dz
rs[1:] += np.abs(np.cumsum(trap_areas))
else:
n_zs_1 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z0) /
self.dz)
zs_1, dz_1 = np.linspace(self.z0, self.z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
integrand_1 = np.tan(self.theta(zs_1))
n_zs_2 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z1) /
self.dz)
zs_2, dz_2 = np.linspace(self.z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
integrand_2 = np.tan(self.theta(zs_2))
rs_1 = np.zeros(len(integrand_1))
trap_areas = ((integrand_1[:-1] + np.diff(integrand_1)/2) *
np.abs(dz_1))
rs_1[1:] += np.cumsum(trap_areas)
rs_2 = np.zeros(len(integrand_2)) + rs_1[-1]
trap_areas = ((integrand_2[:-1] + np.diff(integrand_2)/2) *
np.abs(dz_2))
rs_2[1:] += np.cumsum(trap_areas)
rs = np.concatenate((rs_1, rs_2[1:]))
zs = np.concatenate((zs_1, zs_2[1:]))
xs = self.from_point[0] + rs*np.cos(self.phi)
ys = self.from_point[1] + rs*np.sin(self.phi)
return xs, ys, zs
class SpecializedRayTracePath(BasicRayTracePath):
"""
Class for representing a single ray-trace solution between points.
Stores parameters of the ray path with calculations performed analytically
(with the exception of attenuation). These calculations require the index
of refraction of the ice to be of the form n(z)=n0-k*exp(a*z). However this
restriction allows for most of the integrations to be performed
analytically. The attenuation is the only attribute which is still
calculated by numerical integration with z-steps of size ``dz``. Most
properties are lazily evaluated to save on computation time. If any
attributes of the class instance are changed, the lazily-evaluated
properties will be cleared.
Parameters
----------
parent_tracer : SpecializedRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
uniformity_factor : float
Factor (<1) of the base index of refraction (n0 in the ice model)
beyond which calculations start to break down numerically.
beta_tolerance : float
``beta`` value (near 0) below which calculations start to break down
numerically.
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
SpecializedRayTracer : Class for calculating the ray-trace solutions
between points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
The requirement that the ice model go as n(z)=n0-k*exp(a*z) is implemented
by requiring the ice model to inherit from `AntarcticIce`. Obviously this
is not fool-proof, but likely the ray tracing will obviously fail if the
index follows a very different functional form.
"""
# Factor of index of refraction at which calculations may break down
uniformity_factor = 0.99999
# Beta value below which calculations may break down
beta_tolerance = 0.005
@lazy_property
def valid_ice_model(self):
"""Whether the ice model being used supports this specialization."""
return ((isinstance(self.ice, type) and
issubclass(self.ice, AntarcticIce))
or isinstance(self.ice, AntarcticIce))
@lazy_property
def z_uniform(self):
"""
Depth (m) beyond which the ice should be treated as uniform.
Calculated based on the ``uniformity_factor``. Necessary due to
numerical rounding issues at indices close to the index limit.
"""
return self.ice.depth_with_index(self.ice.n0 * self.uniformity_factor)
@staticmethod
def _z_int_uniform_correction(z0, z1, z_uniform, beta, ice, integrand,
integrand_kwargs={}, numerical=False, dz=None,
derivative_special_case=False):
"""
Function to perform a z-integration with a uniform ice correction.
Can be an analytic or numerical integration. Takes into account the
effect of treating the ice as uniform beyond some depth.
Parameters
----------
z0 : float
(Negative-valued) depth (m) of the left limit of the integral.
z1 : float
(Negative-valued) depth (m) of the right limit of the integral.
z_uniform : float
(Negative-valued) depth (m) below which the ice is assumed to have
a uniform index.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
integrand_kwargs : dict, optional
A dictionary of keyword arguments to be passed into the `integrand`
function.
numerical : boolean, optional
Whether to use the numerical integral instead of an analytic one.
If ``False`` the analytic integral is calculated. If ``True`` the
numerical integral is calculated.
dz : float, optional
The z-step to use for numerical integration. Only needed when
`numerical` is ``True``.
derivative_special_case : boolean, optional
Boolean controlling whether the special case of doing the distance
integral beta derivative should be used.
Returns
-------
Integral of the given `integrand` along the path from `z0` to `z1`.
"""
# Suppress numpy RuntimeWarnings
with np.errstate(divide='ignore', invalid='ignore'):
if numerical:
if dz is None:
raise ValueError("Argument dz must be specified for "+
"numerical integrals")
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 on same side of z_uniform
n_zs = int(np.abs(z1-z0)/dz)
if n_zs<10:
n_zs = 10
zs = np.linspace(z0, z1, n_zs+1)
return integrand(zs, beta=beta, ice=ice, deep=z0<z_uniform,
**integrand_kwargs)
else:
n_zs_1 = int(np.abs(z_uniform-z0)/dz)
if n_zs_1<10:
n_zs_1 = 10
zs_1 = np.linspace(z0, z_uniform, n_zs_1+1)
n_zs_2 = int(np.abs(z1-z_uniform)/dz)
if n_zs_2<10:
n_zs_2 = 10
zs_2 = np.linspace(z_uniform, z1, n_zs_2+1)
return (integrand(zs_1, beta=beta, ice=ice,
deep=z0<z_uniform,
**integrand_kwargs) +
integrand(zs_2, beta=beta, ice=ice,
deep=z1<z_uniform,
**integrand_kwargs))
# Analytic integrals
int_z0 = integrand(z0, beta, ice, deep=z0<z_uniform,
**integrand_kwargs)
int_z1 = integrand(z1, beta, ice, deep=z1<z_uniform,
**integrand_kwargs)
if not derivative_special_case:
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 on same side of z_uniform
return int_z1 - int_z0
else:
int_diff = (
integrand(z_uniform, beta, ice, deep=True,
**integrand_kwargs) -
integrand(z_uniform, beta, ice, deep=False,
**integrand_kwargs)
)
if z0<z1:
# z0 below z_uniform, z1 above z_uniform
return int_z1 - int_z0 + int_diff
else:
# z0 above z_uniform, z1 below z_uniform
return int_z1 - int_z0 - int_diff
else:
# Deal with special case of doing distance integral beta derivative
# which includes two bounds instead of just giving indef. integral
# FIXME: Somewhat inaccurate, should probably be done differently
z_turn = np.log((ice.n0-beta)/ice.k)/ice.a
if (z0<z_uniform)==(z1<z_uniform)==(z_turn<z_uniform):
# All on same side of z_uniform
return int_z0 + int_z1
else:
int_diff = (
integrand(z_uniform, beta, ice, deep=True,
**integrand_kwargs) -
integrand(z_uniform, beta, ice, deep=False,
**integrand_kwargs)
)
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 below z_uniform, but z_turn above
return int_z0 + int_z1 - 2*int_diff
else:
# z0 or z1 below z_uniform, others above
return int_z0 + int_z1 - int_diff
def z_integral(self, integrand, integrand_kwargs={}, numerical=False):
"""
Calculate the integral of the given integrand.
For the integrand as a function of z, the analytic or numerical
integral is calculated along the ray path.
Parameters
----------
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
integrand_kwargs : dict, optional
A dictionary of keyword arguments to be passed into the `integrand`
function.
numerical : boolean, optional
Whether to use the numerical integral instead of an analytic one.
If ``False`` the analytic integral is calculated. If ``True`` the
numerical integral is calculated.
Returns
-------
float
The value of the integral along the ray path.
Raises
------
TypeError
If the ice model is not valid for the specialized analytic
integrations.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must inherit methods from "+
"pyrex.AntarcticIce")
if self.direct:
return self._z_int_uniform_correction(self.z0, self.z1,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
else:
int_1 = self._z_int_uniform_correction(self.z0, self.z_turn,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
int_2 = self._z_int_uniform_correction(self.z1, self.z_turn,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
return int_1 + int_2
@staticmethod
def _int_terms(z, beta, ice):
"""
Useful pre-calculated substitutions for integrations.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
Returns
-------
alpha : float
``n0``^2 - `beta`^2
n_z : float
Index at depth `z`.
gamma : float
`n_z`^2 - `beta`^2
log_term_1 : float
``n0``*`n_z` - `beta`^2 - sqrt(`alpha`*`gamma`)
log_term_2 : float
`n_z` + sqrt(`gamma`)
"""
alpha = ice.n0**2 - beta**2
n_z = ice.n0 - ice.k*np.exp(ice.a*z)
gamma = n_z**2 - beta**2
# Prevent errors when gamma is a very small negative number due to
# numerical rounding errors. This could cause other problems for cases
# where a not-tiny negative gamma would have meant nans but now leads to
# non-nan values. It appears this only occurs when the launch angle
# is greater than the maximum value allowed in the ray tracer however,
# so it's likely alright. If problems arise, replace with gamma<0 and
# np.isclose(gamma, 0) or similar
gamma = np.where(gamma<0, 0, gamma)
log_term_1 = ice.n0*n_z - beta**2 - np.sqrt(alpha*gamma)
log_term_2 = -n_z - np.sqrt(gamma)
return alpha, n_z, gamma, log_term_1, -log_term_2
@classmethod
def _distance_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating radial distance.
Calculates the indefinite z-integral of tan(arcsin(beta/n(z))), which
between two z values gives the radial distance of the direct path
between the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return beta * z / np.sqrt(alpha)
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
0,
beta / np.sqrt(alpha) * (-z + np.log(log_1)/ice.a))
@classmethod
def _distance_integral_derivative(cls, z, beta, ice, deep=False):
"""
Beta derivative of indefinite z-integral for radial distance.
Calculates the beta derivative of the indefinite z-integral of
tan(arcsin(beta/n(z))), which is used for finding the maximum distance
integral value as a function of launch angle. This function actually
gives the integral from z to the turning point ``z_turn``, since that
is what's needed for finding the peak angle.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral derivative at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
z_turn = np.log((ice.n0-beta)/ice.k)/ice.a
if deep:
if z_turn<ice.valid_range[1]:
return ((np.log((ice.n0-beta)/ice.k)/ice.a - z -
beta/(ice.a*(ice.n0-beta))) / np.sqrt(alpha))
else:
return -z / np.sqrt(alpha)
else:
if z_turn<ice.valid_range[1]:
term_1 = ((1+beta**2/alpha)/np.sqrt(alpha) *
(z + np.log(beta*ice.k/log_1) / ice.a))
term_2 = -(beta**2+ice.n0*n_z) / (ice.a*alpha* | np.sqrt(gamma) | numpy.sqrt |
import os, sys
import numpy as np
from . import fishnet as fn
from osgeo import gdal, osr, ogr
class NHGtools(object):
"""
Parameters
-----------------------
ext: dictionary of four keys and values for 'll':lowerleft,
'lr':lowerright, 'ur': upper right, 'ul': upper left'
of desired extent
fc: string, output file name
fctype: string, output format, may be 'gpkg' or 'shp'
fac: float, mult or division factor to resize grid
"""
def __init__(self, ext=None, fac=1, fctype='gpkg',
fc='natlGrid1km'):
self.NHGextent()
self.fctype = fctype
self.fac = fac
self.fc = fc
# self.__proj ='+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23.0 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'
# self.__proj ='+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23.0 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +towgs84=1,1,-1,0,0,0,0 +units=m +no_defs'
self.__proj ='+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=23.0 +lon_0=-96 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs'
self.__projWkt = 'PROJCS["Albers NHG", \
GEOGCS["GCS_WGS_1984", \
DATUM["D_WGS_1984", \
SPHEROID["WGS_1984",6378137,298.257223563, \
AUTHORITY["EPSG","7030"]], \
TOWGS84[0,0,0,0,0,0,0], \
AUTHORITY["EPSG","6326"]], \
PRIMEM["Greenwich",0, \
AUTHORITY["EPSG","8901"]], \
UNIT["degree",0.0174532925199433, \
AUTHORITY["EPSG","9122"]], \
AUTHORITY["EPSG","4326"]], \
PROJECTION["Albers_Conic_Equal_Area"], \
PARAMETER["standard_parallel_1",29.5], \
PARAMETER["standard_parallel_2",45.5], \
PARAMETER["latitude_of_origin",23], \
PARAMETER["central_meridian",-96], \
PARAMETER["false_easting",0], \
PARAMETER["false_northing",0], \
UNIT["Meter",1]]'
# assign defaults
self.__cellsize = self.__natCellsize
self.__icol = self.__ngcols
self.__irow = self.__ngrows
if ext != None:
self.ext = ext
else:
self.ext = self.__natlExt
self.__newext = self.__natlExt
def NHGextent(self):
# coordinates of each corner of the NHG
self.__natlExt = {'ll': [-2553045.0, -92715.0],
'lr': [2426955.0, -92715.0],
'ur': [2426955.0, 3907285.0],
'ul': [-2553045.0, 3907285.0]}
self.__ngrows = 4000
self.__ngcols = 4980
self.__natCellsize = 1000
def customPolyGrid(self):
#ext, cellsize, icol, irow, grd, proj=5070, fctype='gpkg'):
"""
creates a polygon grid from given spatial location
and dimensions.
can write shapefile, sqlite, or geopackage feature class
"""
self.fit2national()
delr = [self.__cellsize for x in range(self.__irow)]
delc = [self.__cellsize for x in range(self.__icol)]
theta = 0.0
print('new cols and rows', self.__icol, self.__irow)
# irow and icol are ....
fn.mkGrid(self.fc, self.__newext['ll'], delc, delr, self.__ngcolNum,
self.__ngrowNum, theta, self.__projWkt, self.fctype,
ngcols=self.__ngcols)
def NationalPoly(self):
delr = [self.__natCellsize for x in range(self.__ngrows)]
delc = [self.__natCellsize for x in range(self.__ngcols)]
icol = 1
irow = 1
theta = 0.
fn.mkGrid(self.fc, self.__natlExt['ll'], delc, delr,
icol, irow, theta, self.__projWkt, fctype=self.fctype,
ngcols=self.__ngcols)
def fit2national(self):
print('national grid')
print(self.__natlExt)
if isinstance(self.fac, int):
res = self.__natCellsize * self.fac
# if fac != 1:
# raise Exception('expecting factor of 1')
if self.fac != 1:
print('NOTE: row, column, and cell number \
attributes labeled as NHG will not \
currently match NHG')
elif isinstance(self.fac, str):
if self.fac == '1/2':
res = self.__natCellsize / 2
elif self.fac == '1/4':
res = self.__natCellsize / 4
elif self.fac == '1/8':
res = self.__natCellsize / 8
elif self.fac == '1/10':
res = self.__natCellsize / 10
else:
res = 0
print('this aint gonna work')
else:
res = 0
print('this also aint gonna work')
newext = {}
syoff = (self.ext['ll'][1] - self.__natlExt['ll'][1]) / res
y = self.__natlExt['ll'][1] + int(syoff) * res
sxoff = (self.ext['ll'][0] - self.__natlExt['ll'][0]) / res
x = self.__natlExt['ll'][0] + (int(sxoff)) * res
newext['ll'] = [x, y]
syoff = (self.ext['ur'][1] - self.__natlExt['ll'][1]) / res
y = self.__natlExt['ll'][1] + int(syoff) * res + res
sxoff = (self.ext['ur'][0] - self.__natlExt['ll'][0]) / res
x = self.__natlExt['ll'][0] + (int(sxoff) + 1) * res
newext['ur'] = [x, y]
newext['lr'] = [x, newext['ll'][1]]
newext['ul'] = [newext['ll'][0], y]
print('new local extent')
print(newext)
self.__newext = newext
self.__ngcolNum = int(abs(self.__natlExt['ul'][0] - newext['ul'][0]) / res ) + 1
self.__ngrowNum = int(abs(self.__natlExt['ul'][1] - newext['ul'][1]) / res) + 1
self.__cellsize = res
print('starting row and col of national grid')
print(self.__ngrowNum, self.__ngcolNum)
# number of rows and cols of new grid
self.__irow, self.__icol = fn.calcRowCol(self.__newext['ll'], self.__newext['lr'],
self.__newext['ur'], self.__cellsize)
print('number of rows, columns, and cellsize of new grid')
print(self.__irow, self.__icol, self.__cellsize)
def readGrid(self, grid):
g = gdal.Open(grid)
gt = g.GetGeoTransform()
rsize = (g.RasterXSize, g.RasterYSize)
a = g.GetRasterBand(1).ReadAsArray()
return(gt, rsize, a)
def createRaster(self, fc=None, rasterName='natlGrid1km.tif',
raster=None):
"""
new, empty raster
"""
# steal geotransform from existing grid
if raster != None:
gt, rsize, a = self.readGrid(raster)
else:
gt = (self.__newext['ul'][0],
self.__cellsize,
0.0,
self.__newext['ul'][1],
0.0,
-self.__cellsize)
rsize = (self.__icol, self.__irow)
srs = osr.SpatialReference()
srs.ImportFromProj4(self.__proj)
drv = gdal.GetDriverByName('GTiff')
self.__rvds = drv.Create(rasterName, rsize[0],
rsize[1], 1, gdal.GDT_Int32)
self.__rvds.SetGeoTransform(gt)
self.__rvds.SetProjection(srs.ExportToWkt())
self.__rvds.GetRasterBand(1).SetNoDataValue(0)
def rasterizer(self, lyrName='modelgrid', attribute='cellnum',
rasterName='natlGrid1km.tif'):
self.fit2national()
self.createRaster(rasterName=rasterName)
ds = ogr.Open('{}.{}'.format(self.fc, self.fctype))
if lyrName != None:
lyr = ds.GetLayerByName(lyrName)
else:
lyr = ds.GetLayer(0)
if self.fctype == 'shp':
lyr = ds.GetLayer(0)
gdal.RasterizeLayer(self.__rvds, [1], lyr, None, None,
[1], ['ATTRIBUTE={}'.format(attribute)])
print('Rasterizification complete')
self.__rvds = None
def writeBand(self):
self.__rvds.GetRasterBand(1).WriteArray(self.__grid)
# self.__rvds.GetRasterBand(1).SetNoDataValue(np.nan)
self.__rvds = None
print('Raster complete')
def makeCellNumRaster(self):
cells = []
for j in range(self.__irow):
for i in range(self.__icol):
irow = self.__irow - j
icol = i + 1
cellNum = (self.__irow - j - 1) * self.__icol + i + 1
cells.append(cellNum)
cells = np.array(cells)
cells = cells.reshape((self.__irow, self.__icol))
self.__grid = | np.flipud(cells) | numpy.flipud |
#! python3
# -*- coding: utf-8 -*-
import numpy as np
lis = np.array([4, 6])
#L0 Norm
print('norm 0')
print(np.linalg.norm(lis, ord=0))
#L1 Norm
#単純にベクトルの各要素の絶対値を足し合わせる。
#X=4, Y=6の場合、 4+6 となる
print('norm 1')
print(np.linalg.norm(lis, ord=1))
#L2 Norm
#原点からベクトルを終点とした直線距離を求める。←の実際の長さを計算する。
print('norm 2')
print( | np.linalg.norm(lis, ord=2) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
"""
Various geometry operations of geometric pythonocc primitives for OCC_AirCONICS
Created on Fri Dec 4 11:58:52 2015
@author: pchambers
"""
# Geometry Manipulation libraries:
#import OCC.Core.Bnd
from OCC.Core.Bnd import Bnd_B2d, Bnd_Box
from OCC.Core.AIS import AIS_WireFrame, AIS_Shape
from OCC.Core.Geom import Geom_BezierCurve
from OCC.Core.GeomAPI import (GeomAPI_PointsToBSpline, GeomAPI_IntCS,
GeomAPI_Interpolate)
from OCC.Core.BRepBndLib import brepbndlib_Add
from OCC.Core.TColgp import (TColgp_Array1OfPnt, TColgp_HArray1OfPnt,
TColgp_Array1OfVec)
from OCC.Core.TColStd import TColStd_HArray1OfBoolean
from OCC.Core.BRepOffsetAPI import (BRepOffsetAPI_ThruSections,
BRepOffsetAPI_MakePipeShell)
from OCC.Core.BRepBuilderAPI import (BRepBuilderAPI_MakeWire,
BRepBuilderAPI_MakeEdge,
BRepBuilderAPI_Transform,
BRepBuilderAPI_MakeFace,
BRepBuilderAPI_GTransform,
BRepBuilderAPI_MakeVertex)
from OCC.Core.BRepPrimAPI import (BRepPrimAPI_MakeBox, BRepPrimAPI_MakeCone,
BRepPrimAPI_MakeHalfSpace,
BRepPrimAPI_MakeSphere)
from OCC.Core.BRepAlgoAPI import BRepAlgoAPI_Section, BRepAlgoAPI_Cut
from OCC.Core.gp import (gp_Trsf, gp_Ax2, gp_Pnt, gp_Dir, gp_Vec, gp_Pln,
gp_GTrsf, gp_Mat, gp_XYZ)
from OCC.Core.GeomAbs import GeomAbs_C2
from OCC.Core.TopoDS import TopoDS_Shape, topods_Vertex, topods_Face, topods_Edge
from OCC.Core.TopAbs import TopAbs_EDGE, TopAbs_FACE, TopAbs_VERTEX
from OCC.Core.TopExp import TopExp_Explorer
from OCC.Core.GC import GC_MakeCircle, GC_MakeSegment
from OCC.Core.Approx import Approx_ChordLength
from OCC.Core.GCPnts import GCPnts_UniformAbscissa
from OCC.Core.GeomAdaptor import GeomAdaptor_Curve, GeomAdaptor_HCurve
from OCC.Core.GeomPlate import (GeomPlate_CurveConstraint,
GeomPlate_BuildPlateSurface,
GeomPlate_MakeApprox)
from OCC.Core.BRepAdaptor import BRepAdaptor_Curve
from OCC.Core.BRepFeat import BRepFeat_SplitShape
from OCC.Core.TopTools import TopTools_ListIteratorOfListOfShape
from OCC.Core.BRepProj import BRepProj_Projection
# FileIO libraries:
from OCC.Core.STEPCAFControl import STEPCAFControl_Writer
from OCC.Core.STEPControl import STEPControl_Writer, STEPControl_AsIs
from OCC.Core.Interface import Interface_Static_SetCVal
from OCC.Core.IFSelect import IFSelect_RetDone
from OCC.Core.TDF import TDF_LabelSequence
from OCC.Core.TCollection import TCollection_ExtendedString
from OCC.Core.TDocStd import TDocStd_Document
from OCC.Core.XCAFApp import XCAFApp_Application
from OCC.Core.XCAFDoc import (XCAFDoc_DocumentTool_ShapeTool,
XCAFDoc_DocumentTool_ColorTool,
XCAFDoc_DocumentTool_LayerTool,
XCAFDoc_DocumentTool_MaterialTool)
from OCCUtils.Construct import make_edge, make_face, make_pipe, make_wire
# Standard Python libraries
#from six.moves import range
import numpy as np
def coerce_handle(obj):
'''
coerces an object that has a GetHandle method to call this method and
return its handle
'''
try:
handle = obj
except:
# If we reach here, assume that the object is already a handle
handle = obj
return handle
class assert_isdone(object):
'''
raises an assertion error when IsDone() returns false, with the error
specified in error_statement
-> this is from the pythonocc-utils utility-may not use it?
'''
def __init__(self, to_check, error_statement):
self.to_check = to_check
self.error_statement = error_statement
def __enter__(self, ):
if self.to_check.IsDone():
pass
else:
raise AssertionError(self.error_statement)
def __exit__(self, type, value, traceback):
pass
# TODO: Add TE function (required for creating tip face)
# def AddTEtoOpenAirfoil(Airfoil):
# """If the airfoil curve given as an argument is open at the trailing edge,
# adds a line between the ends of the curve and joins this with the rest
# of the curve. """
# assert(hasattr(Airfoil, 'Curve')), 'Input object does not have a Curve atribute'
#
# handle = Airfoil.Curve
# if not handle.IsClosed():
# try:
# EP = handle.EndPoint()
# SP = handle.StartPoint()
# Closure = gp_Lin(Ep, SP)
# shapeconstruct
# else:
# print("Curve is already closed")
#
# assert(handle.IsClosed()), "Failed to Add Trailing Edge"
#
# return None
def ObjectsExtents(breps, tol=1e-6, as_vec=False):
"""Compute the extents in the X, Y and Z direction (in the current
coordinate system) of the objects listed in the argument.
Parameters
----------
breps : list of TopoDS_Shape
The shapes to be added for bounding box calculation
tol : float (default=1e-6)
Tolerance for bounding box calculation
as_vec : bool (default=False)
If true, returns minimum and maximum points as tuple of gp_Vec
Returns
-------
xmin, ymin, zmin, xmax, ymax, zmax : scalar
the min and max points of bbox (returned if as_vec=False)
( gp_Vec(xmin, ymin, zmin), gp_Vec(xmax, ymax, zmax) ) : tuple of gp_Vec
the min and max points of bbox (returned in as_vec=True)
Notes
-----
Due to the underlying OCC.Bnd.Bnd_Box functions, the bounding box is
calculated via triangulation of the shapes to avoid inclusion of the
control points of NURBS curves in bounding box calculation
"""
bbox = Bnd_Box()
bbox.SetGap(tol)
try:
for shape in breps:
brepbndlib_Add(shape, bbox, True)
except TypeError:
# Assume not iterable:
brepbndlib_Add(breps, bbox, True)
xmin, ymin, zmin, xmax, ymax, zmax = bbox.Get()
if as_vec is False:
return xmin, ymin, zmin, xmax, ymax, zmax
else:
return gp_Vec(xmin, ymin, zmin), gp_Vec(xmax, ymax, zmax)
def BBox_FromExtents(xmin, ymin, zmin, xmax, ymax, zmax):
"""Generates the Wire Edges defining the Bounding Box defined in the input
arguments: Can be used to display the bounding box"""
s = BRepPrimAPI_MakeBox(gp_Pnt(xmin, ymin, zmin),
gp_Pnt(xmax, ymax, zmax)).Shape()
ais_bbox = AIS_Shape(s)
ais_bbox.SetDisplayMode(AIS_WireFrame)
return ais_bbox
def point_array_to_TColgp_PntArrayType(array, _type=TColgp_Array1OfPnt):
"""Function to return curve from numpy array
Parameters
----------
array : array (Npts x 3) or list
Array of xyz points for which to fit a bspline
_type : type of TColgp array
Tested inputs are,
- TColgp_Array1OfPnt
- TColgp_HArray1OfPnt
See Notes for more information
Returns
-------
pt_arr : TCOLgp_Array1OfPnt
OCC type array of points
Notes
-----
USe TColgp_Harray when interpolating a curve from points with the
GeomAPI_Interpolate. Use TColgp_Array when interpolating a curve
from points with the GeomAPI_PointsToBspline
"""
try:
dims = np.shape(array)
# Attempt to tranpose if x, y, z points span columns (should span rows)
if dims[0] == 3:
array = array.T
elif dims[1] != 3:
raise ValueError("Array must have dimension Npnts x 3 (x, y, z)")
N = np.shape(array)[0]
pt_arr = _type(1, N)
for i, pt in enumerate(array):
pt_arr.SetValue(i + 1, gp_Pnt(*pt.tolist()))
except:
# Input pnts are likely to be a list of gp_Pnt:
N = len(array)
pt_arr = _type(1, N)
for i, pt in enumerate(array):
pt_arr.SetValue(i + 1, pt)
return pt_arr
def points_to_bspline(pnts, deg=3, periodic=False, tangents=None,
scale=False, continuity=GeomAbs_C2):
"""
Points to bspline: originally from pythonocc-utils, changed to allow numpy
arrays as input
Paramters
---------
pnts : list or numpy array
array of x, y, z points
deg : integer
degree of the fitted bspline
periodic : Bool (default=False)
If true, OCC.GeomAPI_Interpolate will be used instead of the
GeomAPI_PointsToBspline. Curve tangent vectors can then be
enforced at the interpolation pnts
tangents : array (default=None)
list of [x,y,z] tangent vectors to be specificied at points:
if only 2 tangents are specified, these will be enforced at the
start and end points, otherwise tangents should have the same length
as pnts and will be enforced at each point.
Scale : Bool (default=False)
Will scale the tangents (gives a smoother Periodic curve if False)
continuity : OCC.GeomAbs.GeomAbs_XX type (default C2)
The order of continuity (C^0, C^1, C^2, G^0, ....)
Returns
-------
crv : OCC.Geom.BSplineCurve
Notes
-----
"""
if not periodic and (tangents is None):
_type = TColgp_Array1OfPnt
pnts = point_array_to_TColgp_PntArrayType(pnts, _type)
# Fit the curve to the point array
deg_min = deg
deg_max = deg
crv = GeomAPI_PointsToBSpline(
pnts, deg_min, deg_max, continuity).Curve()
else:
_type = TColgp_HArray1OfPnt
pnts = point_array_to_TColgp_PntArrayType(pnts, _type)
tol = 0.001
interp = GeomAPI_Interpolate(pnts, periodic, tol)
if tangents is not None:
N = tangents.shape[0]
if N == 2:
try:
interp.Load(gp_Vec(*tangents[0, :]), gp_Vec(*tangents[1, :]),
scale)
except:
# Python 3 issue: using * on numpy array breaks gp_Vec
interp.Load(gp_Vec(*tangents[0, :].tolist()),
gp_Vec(*tangents[1, :].tolist()),
scale)
else:
tan_array = TColgp_Array1OfVec(1, N)
for i in range(1, N + 1):
try:
tan_array.SetValue(i, gp_Vec(*tangents[i - 1, :]))
except TypeError:
# Python 3 issue: using * on numpy array breaks gp_Vec
tan_array.SetValue(
i, gp_Vec(*tangents[i - 1, :].tolist()))
tan_flags = TColStd_HArray1OfBoolean(1, N)
tan_flags.Init(True) # Set all true i.e. enforce all tangents
interp.Load(tan_array, tan_flags, scale)
interp.Perform()
crv = interp.Curve()
return crv
def points_to_BezierCurve(pnts):
"""
Creates a Bezier curve from an array of points.
Parameters
----------
pnts : array or list
x, y, z for an array of points. Allowable inputs are numpy arrays
(with dimensions (Npoints x 3)), python list with elements [xi, yi, zi]
or list of OCC.gp.gp_Pnt objects
Returns
-------
crv : OCC.Geom.Geom_BezierCurve
"""
pnts = point_array_to_TColgp_PntArrayType(pnts, TColgp_Array1OfPnt)
# Fit the curve to the point array
crv = Geom_BezierCurve(pnts)
return crv
def scale_uniformal(brep, pnt, factor, copy=False):
'''
translate a brep over a vector : from pythonocc-utils
Paramters
---------
brep : TopoDS_Shape
the TopoDS_Shape to scale
pnt : gp_Pnt
Origin of scaling
factor : scalar
Scaling factor
copy : bool
copies to brep if True
'''
trns = gp_Trsf()
trns.SetScale(pnt, factor)
brep_trns = BRepBuilderAPI_Transform(brep, trns, copy)
brep_trns.Build()
return brep_trns.Shape()
def transform_nonuniformal(brep, factors, vec=[0, 0, 0], copy=False):
"""Nonuniformly scale brep with respect to pnt by the x y z scaling factors
provided in 'factors', and translate by vector 'vec'
Parameters
----------
factors : List of factors [Fx, Fy, Fz]
Scaling factors with respect to origin (0,0,0)
vec : List of x,y,z or gp_Vec
the translation vector (default is [0,0,0])
Notes
-----
* Only tested on 3d shapes
* Assumes factors are define with respect to the origin (0,0,0)
"""
assert(len(factors) == 3),\
("factors should have [Fx, Fy, Fz] scaling factors: Found length ",
len(factors))
M = np.diag(factors).flatten()
trns_M = gp_Mat(*M)
try:
V = gp_XYZ(*vec)
except NotImplementedError:
V = gp_XYZ(vec.X(), vec.Y(), vec.Z())
trns = gp_GTrsf(trns_M, V)
brep_trns = BRepBuilderAPI_GTransform(brep, trns, copy)
brep_trns.Build()
return brep_trns.Shape()
def FilletFaceCorners(face, radius):
"""Fillets the corners of the input face
Parameters
----------
face : TopoDS_Face
radius : the Fillet radius
Returns
-------
"""
vert_explorer = TopExp_Explorer(face, TopAbs_VERTEX)
from OCC.Core.BRepFilletAPI import BRepFilletAPI_MakeFillet2d
fillet = BRepFilletAPI_MakeFillet2d(face)
while vert_explorer.More():
vertex = topods_Vertex(vert_explorer.Current())
fillet.AddFillet(vertex, radius)
# Note: Needed two next statements here as faces have a vertex on
# either side
vert_explorer.Next()
vert_explorer.Next()
fillet.Build()
face = fillet.Shape()
return face
def ExtrudeFace(face, vec=gp_Vec(1, 0, 0)):
"""Extrudes a face by input vector
Parameters
----------
face : TopoDS_Face
vec : OCC.gp.gp_Vec
The offset vector to extrude through
Returns
-------
shape : TopoDS_Shape
The extruded shape
Notes
-----
Uses BRepBuilderAPI_MakePrism
"""
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakePrism
builder = BRepPrimAPI_MakePrism(face, vec)
builder.Build()
return builder.Shape()
def SplitShapeFromProjection(shape, wire, direction, return_section=True):
"""Splits shape by the projection of wire onto its face
Parameters
----------
shape : TopoDS_Shape
the brep to subtract from
wire : TopoDS_Wire
the tool to use for projection and splitting
direction: OCC.gp.gp_Dir
the direction to project the wire
return_section : bool
returns the split shape
Returns
-------
newshape : TopoDS_Shape
input shape with wire subtracted
section : the shape which was substracted
(returned only if return_section is true)
Notes
-----
Currently assumes splits the first face only
"""
# get the face from the shape
exp = TopExp_Explorer(shape, TopAbs_FACE)
face = topods_Face(exp.Current())
# Perform the projection
proj = BRepProj_Projection(wire, face, direction)
wire = proj.Current()
splitter = BRepFeat_SplitShape(face)
splitter.Add(wire, face)
splitter.Build()
section_list = splitter.DirectLeft()
iterator = TopTools_ListIteratorOfListOfShape(section_list)
section = iterator.Value() # assume here that only 1 section is produced
mod_list = splitter.Modified(face)
iterator = TopTools_ListIteratorOfListOfShape(mod_list)
newshape = iterator.Value()
if return_section:
return newshape, wire
else:
return newshape
def coslin(TransitionPoint, NCosPoints=24, NLinPoints=24):
"""Creates a series of abscissas with cosine spacing from 0 to a
TransitionPoint and a linear spacing thereafter, up to 1. The
TransitionPoint corresponds to pi. Distribution suitable for airfoils
defined by points. TransitionPoint must be in the range [0,1].
Parameters
----------
TransitionPoint : scalar
Point to transition from cosine to linear distribution in range (0, 1)
NCosPoints : int
Number of points to space by cosine law between 0 and TransitionPoint
NLinPoints : int
Number of points to space by linear law between TransitionPoint and 1
Returns
-------
Abscissa : numpy array
The generated abscissa
NCosPoints : int
Number of cosine points used (same as input)
"""
angles = np.linspace(0, np.pi / 2., NCosPoints)
cos_pts = TransitionPoint * (1. - np.cos(angles))
lin_pts = np.linspace(TransitionPoint, 1., NLinPoints + 1)
# Combine points and Remove first linear point (already in cos_pts):
Abscissa = np.hstack((cos_pts, lin_pts[1:]))
return Abscissa, NCosPoints
def export_STEPFile(shapes, filename):
"""Exports a .stp file containing the input shapes
Parameters
----------
shapes : list of TopoDS_Shape
Shapes to write to file
filename : string
The output filename
"""
# initialize the STEP exporter
step_writer = STEPControl_Writer()
# Interface_Static_SetCVal("write.step.schema", "AP214") # Use default?
# transfer shapes
for shape in shapes:
step_writer.Transfer(shape, STEPControl_AsIs)
status = step_writer.Write(filename)
assert(status == IFSelect_RetDone)
return status
# def export_STLFile(AC_Shapes, filename):
# """Writes a component stl file for each shape in input AirCONICS shapes"""
# try:
# for shape in AC_Shapes:
# status = shape.WriteComponents(filename)
# except:
# # Assume error was raised as AC_Shapes contains only one shape
# status = shape.WriteComponents(filename)[0]
# return status
def export_STEPFile_Airconics(AirconicsShapes, filename):
""" Writes a Step file with names defined in the AirconicsShapes. This
function is not fully tested and should not yet be used.
Notes
-----
Work in progress
"""
print("This function is a work in progress. For now, use export_STEPFile")
# create an handle to a document
h_doc = TDocStd_Document()
# Create the application
app = XCAFApp_Application.GetApplication()
app.NewDocument(TCollection_ExtendedString("MDTV-CAF"), h_doc)
# Get root assembly
doc = h_doc
shape_tool = XCAFDoc_DocumentTool_ShapeTool(doc.Main())
# l_colors = XCAFDoc_DocumentTool_ColorTool(doc.Main())
# l_layers = XCAFDoc_DocumentTool_LayerTool(doc.Main())
# l_materials = XCAFDoc_DocumentTool_MaterialTool(doc.Main())
step_writer = STEPCAFControl_Writer()
step_writer.SetColorMode(True)
step_writer.SetLayerMode(True)
step_writer.SetNameMode(True)
# step_writer.SetMaterialMode(True)
for ACshape in AirconicsShapes:
for comp in ACshape.Components:
print("Writing {} to {}".format(comp, filename))
lbl = shape_tool.AddShape(ACshape.Components[comp])
name = TCollection_ExtendedString(comp)
# tdn = TDataStd_Name()
# tdn.Set(lbl, name)
step_writer.Transfer(lbl, STEPControl_AsIs)
status = step_writer.Write(filename)
assert(status == IFSelect_RetDone)
return status
# def import_STEPFile(shapes, filename):
# # TODO: initialize the STEP importer
# step_writer = STEPControl_Reader()
#
# status = step_reader.ReadFile(filename)
#
# assert(status == IFSelect_RetDone)
# return status
def AddSurfaceLoft(objs, continuity=GeomAbs_C2, check_compatibility=True,
solid=True, first_vertex=None, last_vertex=None,
max_degree=8, close_sections=True):
"""Create a lift surface through curve objects
Parameters
----------
objs : list of python classes
Each obj is expected to have an obj.Curve attribute :
see airconics.primitives.airfoil class
continuity : OCC.GeomAbs.GeomAbs_XX type (default C2)
The order of continuity (C^0, C^1, C^2, G^0, ....)
check_compatibility : bool (default=True)
Adds a surface compatibility check to the builder
solid : bool (default=True)
Creates a solid object from the loft if True
first_vertex : TopoDS_Vertex (optional, default=None)
The starting vertex of the surface to add to the 'ThruSections'
algorithm
last_vertex : TopoDS_Vertex (optional, default=None)
The end vertex of the surface to add to the 'ThruSections'
algorithm
max_degree : int (default=8)
The order of the fitted NURBS surface
close_sections : bool (default=True):
Connects the start and end point of the loft rib curves if true. This
has the same effect as adding an airfoil trailing edge.
Returns
-------
shape : TopoDS_Shape
The generated loft surface
Notes
-----
Uses OCC.BRepOffsetAPI.BRepOffsetAPI_ThruSections. This function is
ORDER DEPENDANT, i.e. add elements in the order through which they should
be lofted
"""
assert(len(objs) >= 2), 'Loft Failed: Less than two input curves'
# Note: This is to give a smooth loft.
ruled = False
pres3d = 1e-6
args = [solid, ruled, pres3d] # args (in order) for ThruSections
generator = BRepOffsetAPI_ThruSections(*args)
generator.SetMaxDegree(max_degree)
# from OCC.Core.GeomAbs import GeomAbs_G1
generator.SetParType(Approx_ChordLength)
if first_vertex:
generator.AddVertex(first_vertex)
for obj in objs:
try:
# Check if this is an airconics object with a GeomBspline handle
# as its 'Curve' attribute
obj = obj.Curve
# edge = [make_edge(obj)]
except:
# Assume the object is already a geombspline handle
pass
# try:
# # If working with an airconics object, the OCC curve is stored
# # in obj.Curve:
edges = [make_edge(obj)]
if close_sections:
crv = obj
if crv.IsClosed() is False:
# Add Finite TE edge
TE = make_edge(crv.Value(1), crv.Value(0))
edges.append(TE)
generator.AddWire(BRepBuilderAPI_MakeWire(*edges).Wire())
# else:
# generator
if last_vertex:
generator.AddVertex(last_vertex)
generator.CheckCompatibility(check_compatibility)
generator.SetContinuity(continuity)
generator.Build()
with assert_isdone(generator, 'failed lofting'):
return generator.Shape()
def Generate_InterpFunction(Values, EpsArray=None, uniform=True):
"""Generates a lookup interpolation function.
Given an array of spanwise coordinates epsilon along a curvilinear
leading-edge attached coordinate system, and a set of values describing
e.g. Chord, Sweep at each station, generate and return a function
f(epsilon) which will give the interpolated value.
Parameters
----------
Values : array of float
Values of e.g. chordlength, sweep at each spanwise location in EpsArray
EpsArray : array of float
Distribution of spanwise coordinates at which the Values are known
uniform : bool
If True, assumes that Values corresponds to uniformly distribution
epsilon locations along the lifting surface span
Returns
-------
f : function
the function which returns the interpolated epsilon
"""
if EpsArray is None:
EpsArray = np.linspace(0, 1, len(Values))
def f(Epsilon):
return np.interp(Epsilon, EpsArray, Values)
return f
def translate_topods_from_vector(brep_or_iterable, vec, copy=False):
'''
Function Originally from pythonocc-utils, modified to work on objects
translates a brep over a vector
Parameters
----------
brep : the Topo_DS to translate
vec : the vector defining the translation
copy : copies to brep if True
'''
trns = gp_Trsf()
trns.SetTranslation(vec)
if issubclass(brep_or_iterable.__class__, TopoDS_Shape):
brep_trns = BRepBuilderAPI_Transform(brep_or_iterable, trns, copy)
brep_trns.Build()
return brep_trns.Shape()
else:
return [translate_topods_from_vector(brep_or_iterable, vec, copy) for i in brep_or_iterable]
def Uniform_Points_on_Curve(curve, NPoints):
"""Returns a list of uniformly spaced points on a curve
Parameters
----------
crv : OCC.Geom curve type
NPoints : int
number of sampling points along the curve"""
try:
adapt = GeomAdaptor_Curve(curve)
except:
# Allow the algorithm to deal with TopoDS_Edge and Wire shapes:
adapt = BRepAdaptor_Curve(curve)
absc = GCPnts_UniformAbscissa(adapt, NPoints)
return [adapt.Value(absc.Parameter(i)) for i in range(1, NPoints + 1)]
def rotate(brep, axe, degree, copy=False):
"""Rotates the brep
Originally from pythonocc-utils : might add dependency on this?
Parameters
----------
brep : shape to rotate
axe : axis of rotation
degree : Number of degrees to rotate through
copy : bool (default=False)
Returns
-------
BRepBuilderAPI_Transform.Shape : Shape handle
The handle to the rotated shape
"""
trns = gp_Trsf()
trns.SetRotation(axe, np.radians(degree))
brep_trns = BRepBuilderAPI_Transform(brep, trns, copy)
brep_trns.Build()
return brep_trns.Shape()
def mirror(brep, plane='xz', axe2=None, copy=False):
"""Originally from pythonocc-utils : might add dependency on this?
Mirrors object
Parameters
----------
brep : OCC.TopoDS.TopoDS_Shape
The shape to mirror
plane : string (default = 'xz')
The name of the plane in which to mirror objects. Acceptable inputs are
any of 'xy', 'yx' , 'zy', 'yz', 'yz', 'zy'. Overwritten if axe2 is
defined.
axe2 : OCC.gp.gp_Ax2
The axes through which to mirror (overwrites input 'plane')
copy : bool
Returns
-------
BRepBuilderAPI_Transform.Shape : TopoDS_Shape
The reflected shape
Notes
-----
Pchambers: Added a functionality here to specify a plane using a string so
that users could avoid interacting with core occ objects"""
if axe2:
plane = None
else:
Orig = gp_Pnt(0., 0., 0.)
if plane in ['xz', 'zx']:
ydir = gp_Dir(0, 1, 0)
axe2 = gp_Ax2(Orig, ydir)
elif plane in ['yz', 'zy']:
xdir = gp_Dir(1, 0, 0)
axe2 = gp_Ax2(Orig, xdir)
elif plane in ['xy', 'yx']:
zdir = gp_Dir(0, 0, 1)
axe2 = gp_Ax2(Orig, zdir)
else:
raise(ValueError, "Unknown mirror plane string,", plane)
trns = gp_Trsf()
trns.SetMirror(axe2)
brep_trns = BRepBuilderAPI_Transform(brep, trns, copy)
return brep_trns.Shape()
# TODO: Curve fairing functions
# def batten_curve(pt1, pt2, height, slope, angle1, angle2):
# fc = FairCurve_MinimalVariation(pt1, pt2, height, slope)
# fc.SetConstraintOrder1(2)
# fc.SetConstraintOrder2(2)
# fc.SetAngle1(angle1)
# fc.SetAngle2(angle2)
# fc.SetHeight(height)
# fc.SetSlope(slope)
# fc.SetFreeSliding(True)
# print(fc.DumpToString())
# status = fc.Compute()
# print(error_code(status[0]), error_code(status[1]))
# return fc.Curve()
#
#
# def faircurve(event=None):
# pt1 = gp_Pnt2d(0., 0.)
# pt2 = gp_Pnt2d(0., 120.)
# height = 100.
# pl = Geom_Plane(gp_Pln())
# for i in range(0, 40):
# # TODO: the parameter slope needs to be visualized
# slope = i/100.
# bc = batten_curve(pt1, pt2, height, slope,
# math.radians(i), math.radians(-i))
# display.EraseAll()
# edge = BRepBuilderAPI_MakeEdge(bc, pl).Edge()
# display.DisplayShape(edge, update=True)
# time.sleep(0.21)
def make_pipe_shell(spine, profiles, support=None):
try:
spine = make_wire(make_edge(spine))
except:
pass
pipe = BRepOffsetAPI_MakePipeShell(spine)
for profile in profiles:
try:
pipe.Add(profile)
except:
wire = make_wire(make_edge(profile))
pipe.Add(wire)
if support:
pipe.SetMode(support)
pipe.Build()
return pipe.Shape()
def make_vertex(*args):
vert = BRepBuilderAPI_MakeVertex(*args)
result = vert.Vertex()
return result
def make_ellipsoid(centre_pt, dx, dy, dz):
"""Creates an ellipsoid from non-uniformly scaled unit sphere"""
sphere = BRepPrimAPI_MakeSphere(gp_Pnt(0, 0, 0), 0.5)
ellipsoid = transform_nonuniformal(sphere.Shape(), [dx, dy, dz],
vec=centre_pt)
return ellipsoid
def make_circle3pt(pt1, pt2, pt3):
"""Makes a circle allowing python lists as input points"""
try:
pt1 = gp_Pnt(*pt1)
pt2 = gp_Pnt(*pt2)
pt3 = gp_Pnt(*pt3)
except:
pass
return GC_MakeCircle(pt1, pt2, pt3).Value()
def CalculateSurfaceArea(shape):
"""Calculates the surface area of input shape
Parameters
----------
shape : TopoDS_Shape
Returns
-------
Area : scalar
Calculated surface area
"""
from OCC.Core.BRepGProp import brepgprop_SurfaceProperties
from OCC.Core.GProp import GProp_GProps
System = GProp_GProps()
brepgprop_SurfaceProperties(shape, System)
Area = System.Mass()
return Area
def PlanarSurf(geomcurve):
"""Adds a planar surface to curve
Parameters
----------
geomcurve : OCC.Geom type curve
The edge of the profile
Returns
-------
surf : TopoDS_face
the planar surface
"""
try:
wire = make_wire(make_edge(geomcurve))
except:
# If the geomcurve is passed directly rather than by it's handle:
wire = make_wire(make_edge(geomcurve))
face = make_face(wire)
return face
def project_curve_to_plane(curve, plane, direction):
"""
Computes and returns the cylindrically projected curve onto input plane
Parameters
----------
curve - geom_Curve
plane - Geom_Plane
dir - gp_Dir (default None)
The cylindrical projection direction. If None, the project will be
normal to the plane
Returns
-------
Hproj_curve : Geom_Curve
"""
from OCC.Core.GeomProjLib import geomprojlib_ProjectOnPlane
hc = coerce_handle(curve)
h_pln = coerce_handle(plane)
Hproj_curve = geomprojlib_ProjectOnPlane(hc,
h_pln,
direction,
True)
return Hproj_curve
def project_curve_to_surface(curve, surface, dir):
'''
Returns a curve as cylindrically projected onto the surface shape
Parameters
----------
curve : Geom_curve or TopoDS_Edge/Wire
surface : TopoDS_Shape
dir : gp_Dir
the direction of projection
Returns
-------
res_curve : geom_curve (bspline only?)
'''
try:
edge = make_edge(curve)
except:
# if converting to edge didn't work, assume curve is already an edge
edge = curve
wire = make_wire(edge) # This will return wire is curve is already a wire
from OCC.Core.BRepProj import BRepProj_Projection
from OCC.Core.BRepAdaptor import BRepAdaptor_CompCurve
proj = BRepProj_Projection(wire, surface, dir)
res_wire = proj.Current()
res_curve = BRepAdaptor_CompCurve(res_wire).BSpline()
return res_curve
def points_from_intersection(plane, curve):
'''
Find intersection points between plane and curve.
Parameters
----------
plane : Geom_Plane
The Plane
curve : Geom_*Curve
The Curve
Returns
-------
P : Point or list of points
A single intersection point (OCC.gp.gp_Pnt) if one intersection is
found, or list of points if more than one is found.
- If No Intersection points were found, returns None
Notes
-----
The plane is first converted to a surface As the GeomAPI_IntCS class
requires this.
'''
intersector = GeomAPI_IntCS(curve, plane)
with assert_isdone(intersector, 'failed to calculate intersection'):
nb_results = intersector.NbPoints()
if nb_results == 1:
return intersector.Point(1)
elif nb_results >= 1:
P = []
for i in range(1, nb_results + 1):
P.append(intersector.Point(i))
else:
return None
# TODO: Network surface function needs fixing
# def Add_Network_Surface(curvenet, deg=3, initsurf=None):
# '''Adds a surface from curve network using the OCC plate surface algorithm
# This function is in development and currently raises an error
# Parameters
# ----------
# curvenet : list of GeomCurve
# Notes
# -----
# '''
# raise NotImplementedError('This function is not yet safe for general use')
# # fill = BRepFill_Filling(deg)
# # for curve in curvenet:
# # try:
# # fill.Add(make_edge(curve), continuity)
# # except TypeError:
# # # If curve is given as object rather than handle
# # fill.Add(make_edge(curve), continuity)
# # fill.Build()
# # face = fill.Face()
# # return face
# print("This function is not tested and should not be used with certainty")
# builder = GeomPlate_BuildPlateSurface(deg, 15, 5)
# if initsurf is not None:
# "Loading Initial Surface"
# builder.LoadInitSurface()
# "Initial Surface loaded"
# for curve in curvenet:
# print(type(curve))
# adaptor = GeomAdaptor_Curve(curve)
# Hadapter = GeomAdaptor_HCurve(adaptor)
# constr = GeomPlate_CurveConstraint(Hadapter, 0)
# builder.Add(constr)
# # first didnt work... attempt 2 :
# # edge = make_edge(curve)
# # C = BRepAdaptor_HCurve()
# # C.ChangeCurve().Initialize(edge)
# # Cont = BRepFill_CurveConstraint(C, 0)
# # builder.Add(Cont)
# #
# # Try adding from wires instead.. attempt 3:
# # exp =
# builder.Perform()
# with assert_isdone(builder, 'Failed to create Plate Surface'):
# # Approximate the surface into a bspline surface
# surf = builder.Surface()
# approx = GeomPlate_MakeApprox(surf, 0.001, 10, 8, 0.001, 0).Surface()
# Umin, Umax, Vmin, Vmax = surf.Bounds()
# print(Umin, Umax, Vmin, Vmax)
# print("about to make face:")
# face = make_face(approx, 0.1) # Umin, Umax, Vmin, Vmax, 0.1)
# print("Face made")
# return face
def CutSect(Shape, SpanStation):
"""
Parameters
----------
Shape : TopoDS_Shape
The Shape to find planar cut section (parallel to xz plane)
SpanStation : scalar in range (0, 1)
y-direction location at which to cut Shape
Returns
-------
Section : result of OCC.BRepAlgoAPI.BRepAlgoAPI_Section (TopoDS_Shape)
The cut section of shape given a cut plane parallel to xz at input
Spanstation.
Chord : result of OCC.GC.GC_MakeSegment.Value (Geom_TrimmedCurve)
The Chord line between x direction extremeties
"""
(Xmin, Ymin, Zmin, Xmax, Ymax, Zmax) = ObjectsExtents([Shape])
YStation = Ymin + (Ymax - Ymin) * SpanStation
OriginX = Xmin - 1
OriginZ = Zmin - 1
P = gp_Pln(gp_Pnt(OriginX, YStation, OriginZ), gp_Dir(gp_Vec(0, 1, 0)))
# Note: using 2*extents here as previous +1 trimmed plane too short
CutPlaneSrf = make_face(P, 0, Zmax + 2, 0, Xmax + 2)
I = BRepAlgoAPI_Section(Shape, CutPlaneSrf)
I.ComputePCurveOn1(True)
I.Approximation(True)
I.Build()
Section = I.Shape()
(Xmin, Ymin, Zmin, Xmax, Ymax, Zmax) = ObjectsExtents([Section])
# Currently assume only one edge exists in the intersection:
exp = TopExp_Explorer(Section, TopAbs_EDGE)
edge = topods_Edge(exp.Current())
# Find the apparent chord of the section (that is, the line connecting the
# fore most and aftmost points on the curve
DivPoints = Uniform_Points_on_Curve(edge, 200)
Xs = np.array([pt.X() for pt in DivPoints])
min_idx = | np.argmin(Xs) | numpy.argmin |
# Utility functions for the course Robot Modelling
# <NAME> (<EMAIL>), sept. 2016
#
# Additional functions added for more functionality
# <NAME> (<EMAIL>), sept. 2018
# <NAME> (<EMAIL>), sept. 2018
###############################################################################
import numpy as np
from numpy import cos, sin
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
"""
Check if input is a correct matrix
:param R:
:return:
"""
Rt = np.transpose(R.copy())
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def inverse_kinematics_wrist(R):
"""
Calculates the inverse kinematics of the wrist of the robot
:param R:
:return:
"""
minplus = 1
t5 = np.arctan2(minplus * np.sqrt(1 - (R[2, 2]**2)), R[2, 2])
t4 = np.arctan2(minplus * R[1, 2], minplus * R[0, 2])
t6 = np.arctan2(minplus * R[2, 1], minplus * -R[2, 0])
R_check = np.array([[cos(t4) * cos(t5) * cos(t6) - sin(t4) * sin(t6) - R[0, 0], -cos(t4) * cos(t5) * sin(t6) - sin(t4) * cos(t6) - R[0, 1], cos(t4) * sin(t5) - R[0, 2]],
[sin(t4) * cos(t5) * cos(t6) + cos(t4) * sin(t6) - R[1, 0], -sin(t4) * cos(t5) * sin(t6) + cos(t4) * cos(t6) - R[1, 1], sin(t4) * sin(t5) - R[1, 2]],
[-sin(t5) * cos(t6) - R[2, 0], sin(t5) * sin(t6) - R[2, 1], cos(t5) - R[2, 2]]])
return np.array([t4, t5, t6]), R_check
def make_rotation_matrix(axis, angle):
"""
make a rotation matrix based on an angle and specified axis
:param axis: string that specifies over which axis will be rotated
:param angle: rotation angle in radians
:return: rotation matrix
"""
if axis == "x":
return np.array([[1, 0, 0],
[0, cos(angle), -sin(angle)],
[0, sin(angle), cos(angle)]])
elif axis == "y":
return np.array([[cos(angle), 0, -sin(angle)],
[0, 1, 0],
[sin(angle), 0, cos(angle)]])
elif axis == "z":
return np.array([[cos(angle), -sin(angle), 0],
[sin(angle), cos(angle), 0],
[0, 0, 1]])
def make_DH_matrix(DH_parameters):
"""
make a homogenious matrix based on the Denavit Hartenberg Convention
:param DH_parameters: array of 4 with all DH parameters
:return: DH matrix
"""
from numpy import cos, sin
length = DH_parameters[0]
twist = DH_parameters[1]
offset = DH_parameters[2]
angle = DH_parameters[3]
return np.array([[cos(angle), -sin(angle) * cos(twist), sin(angle) * sin(twist), length * cos(angle)],
[sin(angle), cos(angle) * cos(twist), -cos(angle) * sin(twist), length * sin(angle)],
[0, sin(twist), cos(twist), offset],
[0, 0, 0, 1]])
def interpolate(values, precision):
"""Create positionvalues within the given trajectory
precision = amount of subvalues"""
nr_values = len(values)
solution = []
for nr in range(0, nr_values):
if nr < nr_values - 1:
delta_val = np.subtract(values[nr + 1], values[nr])
x_val = np.true_divide(delta_val, precision)
for x in range(0, precision):
solution.append(np.add(values[nr], np.multiply(x_val, x)))
else:
break
solution = np.array(solution)
return solution
def make_homogenious_matrix(rotation, translation):
return np.vstack((np.hstack((rotation, translation)), np.array([0, 0, 0, 1])))
# function for the inverse kinematics of a 3DOF robot
def inverse_algorithm_3DOF(arms, points, elbow_down=False):
"""Inverse kinematics of a scara robot.
Inputs:
arms: 3-element array/list with arm lengths
point2: 3-element array with (x,y,z) coordinate of end point
elbow_down (optional): True/False boolean to determine
which solution needs to be returned
Output:
angles: 3-element array/list with angles in radians(!)
"""
x = points[0]
y = points[1]
z = points[2]
d1 = arms[0]
d2 = arms[1]
d3 = arms[2]
s = z - d1
r = np.sqrt(x**2 + y**2)
c = np.sqrt(r**2 + s**2)
beta = np.arctan2(s, r)
alpha = np.arccos(np.minimum(1, ((-d3**2 + d2**2 + c**2) / (2 * d2 * c))))
theta1 = np.arctan2(y, x)
upper_cos = (-c**2 + d3**2 + d2**2)
lower_cos = (2 * d3 * d2)
if abs(upper_cos) > abs(lower_cos):
return [0, 0, 0], True
if elbow_down:
theta2 = beta - alpha
theta3 = np.radians(180) - np.arccos(np.minimum(1, (upper_cos / lower_cos)))
else:
theta2 = beta + alpha
theta3 = -(np.radians(180) - np.arccos(np.minimum(1, (upper_cos / lower_cos))))
angles = [theta1, theta2, theta3, 0]
return angles, False
def kin_planar_forward(arms, angles):
"""Forward kinematics of a 2-link planar robot.
Inputs:
arms: 2-element array/list with arm lengths
angles: 2-element array/list with angles in radians(!)
Output:
point2: 2-element numpy array with (x,y) coordinate of end point
"""
x1 = arms[0] * np.cos(angles[0])
y1 = arms[0] * np.sin(angles[0])
x2 = x1 + arms[1] * np.cos(angles[0] + angles[1])
y2 = y1 + arms[1] * np.sin(angles[0] + angles[1])
points = np.array([x2, y2])
return points
def kin_planar_inverse(arms, points, elbow_down=True):
"""Inverse kinematics of a 2-link planar robot.
Inputs:
arms: 2-element array/list with arm lengths
point2: 2-element array with (x,y) coordinate of end point
elbow_down (optional): True/False boolean to determine
which solution needs to be returned
Output:
angles: 2-element array/list with angles in radians(!)
"""
x = points[0]
y = points[1]
a1 = arms[0]
a2 = arms[1]
D = (x ** 2 + y ** 2 - a1 ** 2 - a2 ** 2) / (2 * a1 * a2)
f = np.sqrt(1 - (D ** 2))
if elbow_down:
theta2 = np.arctan2(f, D)
else:
theta2 = np.arctan2(-f, D)
theta1 = np.arctan2(y, x) - np.arctan2((a2 * np.sin(theta2)), (a1 + a2 * np.cos(theta2)))
angles = np.array([theta1, theta2])
return angles
def sphere():
import pyqtgraph.opengl as gl
sphere_data= gl.MeshData.sphere(rows=8,
cols=16)
obj = gl.GLMeshItem(meshdata=sphere_data,
smooth=False,
drawFaces=True,
faceColor=(0.2, 0.3, 0.4, 1),
drawEdges=False,
edgeColor=(0.2, 0.3, 0.4, 1))
return obj
# cylinder is a convenience function to create a cylinder shape in
# pyqtgraph/OpenGL, it gives you a number of vertices distributed over the
# surface of the cylinder and triangular shaped faces that cover the whole
# surface of the cylinder
# cylinders are being used to visualize joints
def cylinder(radius, height, N):
"""Calculates vertices and faces for a cylinder for visualisation in
pyqtgraph/OpenGL.
Inputs:
radius: radius of the cylinder
height: height of the cylinder
N: number of segments to approximate the circular shape of the cylinder
Outputs:
vertices: array with on each row the (x,y,z) coordinates of the vertices
faces: array with triangular faces of the cylinder
Note:
The cylinder is a circle in the x,y plane with center at (0,0) that is
extruded along the z-axis.
"""
import scipy.spatial
t = np.linspace(0, 2 * np.pi, N, endpoint=False).reshape(N, 1)
vertices = np.zeros((2 * N, 3))
vertices[0:N, :] = np.hstack((radius * np.cos(t), radius * np.sin(t), np.zeros((N, 1))))
vertices[N:2 * N, :] = vertices[0:N, :] + np.hstack((np.zeros((N, 2)), height * np.ones((N, 1))))
faces = np.zeros((N - 2 + 2 * N + N - 2, 3), dtype=np.uint)
# bottom, makes use of Delaunay triangulation contained in Scipy's
# submodule spatial (which on its turn makes use of the Qhull library)
faces[0:N - 2, :] = scipy.spatial.Delaunay(vertices[0:N, 0:2], furthest_site=True, qhull_options='QJ').simplices[:,
-1::-1]
# sides
for i in range(N - 1):
faces[N - 2 + 2 * i, :] = np.array([i, i + 1, N + i + 1], dtype=np.uint)
faces[N - 2 + 2 * i + 1, :] = np.array([i, N + i + 1, N + i], dtype=np.uint)
# final one between the last and the first:
faces[N - 2 + 2 * (N - 1), :] = np.array([N - 1, 0, N], dtype=np.uint)
faces[N - 2 + 2 * (N - 1) + 1, :] = np.array([N - 1, N, 2 * N - 1], dtype=np.uint)
# top
faces[N - 2 + 2 * N:N - 2 + 2 * N + N - 2, :] = N + faces[0:N - 2, -1::-1]
return vertices, faces
# simular to the cylinder, but not for creating a box-shaped object
# boxes are used to visualize links
def box(size=(1, 1, 1)):
"""Calculates vertices and faces for a box for visualisation in
pyqtgraph/OpenGL.
Inputs:
size: 3 element array/list with the width,depth,height, i.e.
the dimensions along the x, y and z-axis.
Outputs:
vertices: array with on each row the (x,y,z) coordinates of the vertices
faces: array with triangular faces of the box
Note:
The box is between (0,0,0) and (size[0],size[1],size[2]), note that
negative sizes are not prevented but result in strange artifacts because
it changes the orientation of the faces of the box (inside becomes
outside).
"""
vertices = np.zeros((8, 3))
faces = np.zeros((12, 3), dtype=np.uint)
xdim = size[0]
ydim = size[1]
zdim = size[2]
vertices[0, :] = np.array([0, ydim, 0])
vertices[1, :] = np.array([xdim, ydim, 0])
vertices[2, :] = np.array([xdim, 0, 0])
vertices[3, :] = np.array([0, 0, 0])
vertices[4, :] = np.array([0, ydim, zdim])
vertices[5, :] = np.array([xdim, ydim, zdim])
vertices[6, :] = np.array([xdim, 0, zdim])
vertices[7, :] = np.array([0, 0, zdim])
faces = np.array([
# bottom (clockwise, while looking from top)
[2, 1, 0],
[3, 2, 0],
# sides (counter-clock-wise)
[0, 1, 5],
[0, 5, 4],
[1, 2, 6],
[1, 6, 5],
[2, 3, 7],
[2, 7, 6],
[3, 0, 4],
[3, 4, 7],
# top (counter-clockwise)
[4, 5, 6],
[4, 6, 7]
], dtype=np.uint)
return vertices, faces
def rotate_xyz(angles):
"""
Calculates the rotations matrix for xyz angles
(x,y,z)
:param angles:
:return:
"""
x, y, z = angles
rotate_x = np.array([[1, 0, 0],
[0, np.cos(x), np.sin(x)],
[0, -np.sin(x), np.cos(x)]])
rotate_y = np.array([[np.cos(y), 0, -np.sin(y)],
[0, 1, 0],
[ | np.sin(y) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Copyright () 2018
All rights reserved
FILE: cifar10_resnet.py
AUTHOR: tianyuningmou
DATE CREATED: @Time : 2018/5/22 下午2:26
DESCRIPTION: .
VERSION: : #1
CHANGED By: : tianyuningmou
CHANGE: :
MODIFIED: : @Time : 2018/5/22 下午2:26
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras.models import Model
from keras.datasets import cifar10
import numpy as np
import os
batch_size = 32
epochs = 200
data_augmentation = True
num_classes = 10
subtract_pixel_mean = True
n = 3
version = 1
if version == 1:
depth = n * 6 + 2
elif version == 2:
depth = n * 9 + 2
model_type = 'ResNet{depth}v{version}'.format(depth=depth, version=version)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
input_shape = x_train.shape[1:]
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
if subtract_pixel_mean:
x_train_mean = | np.mean(x_train, axis=0) | numpy.mean |
"""Preprocessing data methods."""
import random
import numpy as np
import pandas as pd
from autots.tools.impute import FillNA, df_interpolate
from autots.tools.seasonal import date_part, seasonal_int
class EmptyTransformer(object):
"""Base transformer returning raw data."""
def __init__(self, name: str = 'EmptyTransformer', **kwargs):
self.name = name
def _fit(self, df):
"""Learn behavior of data to change.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
def fit(self, df):
"""Learn behavior of data to change.
Args:
df (pandas.DataFrame): input dataframe
"""
self._fit(df)
return self
def transform(self, df):
"""Return changed data.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Return data to original *or* forecast form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
def fit_transform(self, df):
"""Fits and Returns *Magical* DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
return self._fit(df)
def __repr__(self):
"""Print."""
return 'Transformer ' + str(self.name) + ', uses standard .fit/.transform'
@staticmethod
def get_new_params(method: str = 'random'):
"""Generate new random parameters"""
if method == 'test':
return {'test': random.choice([1, 2])}
else:
return {}
def remove_outliers(df, std_threshold: float = 3):
"""Replace outliers with np.nan.
https://stackoverflow.com/questions/23199796/detect-and-exclude-outliers-in-pandas-data-frame
Args:
df (pandas.DataFrame): DataFrame containing numeric data, DatetimeIndex
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df = df[np.abs(df - df.mean()) <= (std_threshold * df.std())]
return df
def clip_outliers(df, std_threshold: float = 3):
"""Replace outliers above threshold with that threshold. Axis = 0.
Args:
df (pandas.DataFrame): DataFrame containing numeric data
std_threshold (float): The number of standard deviations away from mean to count as outlier.
"""
df_std = df.std(axis=0, skipna=True)
df_mean = df.mean(axis=0, skipna=True)
lower = df_mean - (df_std * std_threshold)
upper = df_mean + (df_std * std_threshold)
df2 = df.clip(lower=lower, upper=upper, axis=1)
return df2
def simple_context_slicer(df, method: str = 'None', forecast_length: int = 30):
"""Condensed version of context_slicer with more limited options.
Args:
df (pandas.DataFrame): training data frame to slice
method (str): Option to slice dataframe
'None' - return unaltered dataframe
'HalfMax' - return half of dataframe
'ForecastLength' - return dataframe equal to length of forecast
'2ForecastLength' - return dataframe equal to twice length of forecast
(also takes 4, 6, 8, 10 in addition to 2)
'n' - any integer length to slice by
'-n' - full length less this amount
"0.n" - this percent of the full data
"""
if method in [None, "None"]:
return df
df = df.sort_index(ascending=True)
if 'forecastlength' in str(method).lower():
len_int = int([x for x in str(method) if x.isdigit()][0])
return df.tail(len_int * forecast_length)
elif method == 'HalfMax':
return df.tail(int(len(df.index) / 2))
elif str(method).replace("-", "").replace(".", "").isdigit():
method = float(method)
if method >= 1:
return df.tail(int(method))
elif method > -1:
return df.tail(int(df.shape[0] * abs(method)))
else:
return df.tail(int(df.shape[0] + method))
else:
print("Context Slicer Method not recognized")
return df
class Detrend(EmptyTransformer):
"""Remove a linear trend from the data."""
def __init__(self, model: str = 'GLS', **kwargs):
super().__init__(name='Detrend')
self.model = model
self.need_positive = ['Poisson', 'Gamma', 'Tweedie']
@staticmethod
def get_new_params(method: str = 'random'):
if method == "fast":
choice = random.choices(
[
"GLS",
"Linear",
],
[
0.5,
0.5,
],
k=1,
)[0]
else:
choice = random.choices(
[
"GLS",
"Linear",
"Poisson",
"Tweedie",
"Gamma",
"TheilSen",
"RANSAC",
"ARD",
],
[0.24, 0.2, 0.1, 0.1, 0.1, 0.02, 0.02, 0.02],
k=1,
)[0]
return {
"model": choice,
}
def _retrieve_detrend(self, detrend: str = "Linear"):
if detrend == 'Linear':
from sklearn.linear_model import LinearRegression
return LinearRegression(fit_intercept=True)
elif detrend == "Poisson":
from sklearn.linear_model import PoissonRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(
PoissonRegressor(fit_intercept=True, max_iter=200)
)
elif detrend == 'Tweedie':
from sklearn.linear_model import TweedieRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(TweedieRegressor(power=1.5, max_iter=200))
elif detrend == 'Gamma':
from sklearn.linear_model import GammaRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(
GammaRegressor(fit_intercept=True, max_iter=200)
)
elif detrend == 'TheilSen':
from sklearn.linear_model import TheilSenRegressor
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(TheilSenRegressor())
elif detrend == 'RANSAC':
from sklearn.linear_model import RANSACRegressor
return RANSACRegressor()
elif detrend == 'ARD':
from sklearn.linear_model import ARDRegression
from sklearn.multioutput import MultiOutputRegressor
return MultiOutputRegressor(ARDRegression())
else:
from sklearn.linear_model import LinearRegression
return LinearRegression()
def fit(self, df):
"""Fits trend for later detrending.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
Y = df.values
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
if self.model == 'GLS':
from statsmodels.regression.linear_model import GLS
self.trained_model = GLS(Y, X, missing='drop').fit()
else:
self.trained_model = self._retrieve_detrend(detrend=self.model)
if self.model in self.need_positive:
self.trnd_trans = PositiveShift(
log=False, center_one=True, squared=False
)
Y = pd.DataFrame(self.trnd_trans.fit_transform(df)).values
X = X.reshape((-1, 1))
self.trained_model.fit(X, Y)
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
if self.model != "GLS":
X = X.reshape((-1, 1))
# df = df.astype(float) - self.model.predict(X)
if self.model in self.need_positive:
temp = pd.DataFrame(
self.trained_model.predict(X), index=df.index, columns=df.columns
)
temp = self.trnd_trans.inverse_transform(temp)
df = df - temp
else:
df = df - self.trained_model.predict(X)
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
if self.model != "GLS":
X = X.reshape((-1, 1))
if self.model in self.need_positive:
temp = pd.DataFrame(
self.trained_model.predict(X), index=df.index, columns=df.columns
)
df = df + self.trnd_trans.inverse_transform(temp)
else:
df = df + self.trained_model.predict(X)
# df = df.astype(float) + self.trained_model.predict(X)
return df
class StatsmodelsFilter(EmptyTransformer):
"""Irreversible filters.
Args:
method (str): bkfilter or cffilter
"""
def __init__(self, method: str = 'bkfilter', **kwargs):
super().__init__(name="StatsmodelsFilter")
self.method = method
def fit(self, df):
"""Fits filter.
Args:
df (pandas.DataFrame): input dataframe
"""
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
if self.method == 'bkfilter':
from statsmodels.tsa.filters import bk_filter
cycles = bk_filter.bkfilter(df, K=1)
cycles.columns = df.columns
df = (df - cycles).fillna(method='ffill').fillna(method='bfill')
elif self.method == 'cffilter':
from statsmodels.tsa.filters import cf_filter
cycle, trend = cf_filter.cffilter(df)
cycle.columns = df.columns
df = df - cycle
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
return df
class SinTrend(EmptyTransformer):
"""Modelling sin."""
def __init__(self, **kwargs):
super().__init__(name="SinTrend")
def fit_sin(self, tt, yy):
"""Fit sin to the input time sequence, and return fitting parameters "amp", "omega", "phase", "offset", "freq", "period" and "fitfunc"
from user unsym @ https://stackoverflow.com/questions/16716302/how-do-i-fit-a-sine-curve-to-my-data-with-pylab-and-numpy
"""
import scipy.optimize
tt = np.array(tt)
yy = np.array(yy)
ff = np.fft.fftfreq(len(tt), (tt[1] - tt[0])) # assume uniform spacing
Fyy = abs(np.fft.fft(yy))
guess_freq = abs(
ff[np.argmax(Fyy[1:]) + 1]
) # excluding the zero frequency "peak", which is related to offset
guess_amp = np.std(yy) * 2.0 ** 0.5
guess_offset = np.mean(yy)
guess = np.array([guess_amp, 2.0 * np.pi * guess_freq, 0.0, guess_offset])
def sinfunc(t, A, w, p, c):
return A * np.sin(w * t + p) + c
popt, pcov = scipy.optimize.curve_fit(sinfunc, tt, yy, p0=guess, maxfev=10000)
A, w, p, c = popt
# f = w/(2.*np.pi)
# fitfunc = lambda t: A * np.sin(w*t + p) + c
return {
"amp": A,
"omega": w,
"phase": p,
"offset": c,
} # , "freq": f, "period": 1./f, "fitfunc": fitfunc, "maxcov": np.max(pcov), "rawres": (guess,popt,pcov)}
def fit(self, df):
"""Fits trend for later detrending
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
self.sin_params = pd.DataFrame()
# make this faster (250 columns in 2.5 seconds isn't bad, though)
for column in df.columns:
try:
y = df[column].values
vals = self.fit_sin(X, y)
current_param = pd.DataFrame(vals, index=[column])
except Exception as e:
print(f"SinTrend failed with {repr(e)}")
current_param = pd.DataFrame(
{"amp": 0, "omega": 1, "phase": 1, "offset": 1}, index=[column]
)
self.sin_params = pd.concat([self.sin_params, current_param], axis=0)
self.shape = df.shape
return self
def fit_transform(self, df):
"""Fits and Returns Detrended DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Returns detrended data
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
sin_df = pd.DataFrame()
# make this faster
for index, row in self.sin_params.iterrows():
yy = pd.DataFrame(
row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],
columns=[index],
)
sin_df = pd.concat([sin_df, yy], axis=1)
df_index = df.index
df = df.astype(float).reset_index(drop=True) - sin_df.reset_index(drop=True)
df.index = df_index
return df
def inverse_transform(self, df):
"""Returns data to original form
Args:
df (pandas.DataFrame): input dataframe
"""
try:
df = df.astype(float)
except Exception:
raise ValueError("Data Cannot Be Converted to Numeric Float")
X = pd.to_numeric(df.index, errors='coerce', downcast='integer').values
sin_df = pd.DataFrame()
# make this faster
for index, row in self.sin_params.iterrows():
yy = pd.DataFrame(
row['amp'] * np.sin(row['omega'] * X + row['phase']) + row['offset'],
columns=[index],
)
sin_df = pd.concat([sin_df, yy], axis=1)
df_index = df.index
df = df.astype(float).reset_index(drop=True) + sin_df.reset_index(drop=True)
df.index = df_index
return df
class PositiveShift(EmptyTransformer):
"""Shift each series if necessary to assure all values >= 1.
Args:
log (bool): whether to include a log transform.
center_one (bool): whether to shift to 1 instead of 0.
squared (bool): whether to square (**2) values after shift.
"""
def __init__(
self, log: bool = False, center_one: bool = True, squared=False, **kwargs
):
super().__init__(name="PositiveShift")
self.log = log
self.center_one = center_one
self.squared = squared
def fit(self, df):
"""Fits shift interval.
Args:
df (pandas.DataFrame): input dataframe
"""
if self.log or self.center_one:
shift_amount = df.min(axis=0) - 1
else:
shift_amount = df.min(axis=0)
self.shift_amount = shift_amount.where(shift_amount < 0, 0).abs()
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""Return detrended data.
Args:
df (pandas.DataFrame): input dataframe
"""
df = df + self.shift_amount
if self.squared:
df = df ** 2
if self.log:
df_log = pd.DataFrame(np.log(df))
return df_log
else:
return df
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
if self.log:
df = pd.DataFrame(np.exp(df))
if self.squared:
df = df ** 0.5
df = df - self.shift_amount
return df
class IntermittentOccurrence(EmptyTransformer):
"""Intermittent inspired binning predicts probability of not center.
Does not inverse to original values!
Args:
center (str): one of "mean", "median", "midhinge"
"""
def __init__(self, center: str = "median", **kwargs):
super().__init__(name="IntermittentOccurrence")
self.center = center
@staticmethod
def get_new_params(method: str = 'random'):
if method == "fast":
choice = "mean"
else:
choice = random.choices(
[
"mean",
"median",
"midhinge",
],
[0.4, 0.3, 0.3],
k=1,
)[0]
return {
"center": choice,
}
def fit(self, df):
"""Fits shift interval.
Args:
df (pandas.DataFrame): input dataframe
"""
if self.center == "mean":
self.df_med = df.mean(axis=0)
elif self.center == "midhinge":
self.df_med = (df.quantile(0.75, axis=0) + df.quantile(0.25, axis=0)) / 2
else:
self.df_med = df.median(axis=0, skipna=True)
self.upper_mean = df[df > self.df_med].mean(axis=0) - self.df_med
self.lower_mean = df[df < self.df_med].mean(axis=0) - self.df_med
self.lower_mean.fillna(0, inplace=True)
self.upper_mean.fillna(0, inplace=True)
return self
def fit_transform(self, df):
"""Fit and Return Detrended DataFrame.
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def transform(self, df):
"""0 if Median. 1 if > Median, -1 if less.
Args:
df (pandas.DataFrame): input dataframe
"""
temp = df.where(df >= self.df_med, -1)
temp = temp.where(df <= self.df_med, 1).where(df != self.df_med, 0)
return temp
def inverse_transform(self, df):
"""Return data to original form.
Args:
df (pandas.DataFrame): input dataframe
"""
invtrans_df = df.copy()
invtrans_df = invtrans_df.where(df <= 0, self.upper_mean * df, axis=1)
invtrans_df = invtrans_df.where(
df >= 0, (self.lower_mean * df).abs() * -1, axis=1
)
invtrans_df = invtrans_df + self.df_med
invtrans_df = invtrans_df.where(df != 0, self.df_med, axis=1)
return invtrans_df
class RollingMeanTransformer(EmptyTransformer):
"""Attempt at Rolling Mean with built-in inverse_transform for time series
inverse_transform can only be applied to the original series, or an immediately following forecast
Does not play well with data with NaNs
Inverse transformed values returned will also not return as 'exactly' equals due to floating point imprecision.
Args:
window (int): number of periods to take mean over
"""
def __init__(self, window: int = 10, fixed: bool = False, **kwargs):
super().__init__(name="RollingMeanTransformer")
self.window = window
self.fixed = fixed
@staticmethod
def get_new_params(method: str = 'random'):
bool_c = bool(random.getrandbits(1))
if method == "fast":
choice = random.choice([3, 7, 10, 12])
else:
choice = seasonal_int(include_one=False)
return {"fixed": bool_c, "window": choice}
def fit(self, df):
"""Fits.
Args:
df (pandas.DataFrame): input dataframe
"""
self.shape = df.shape
self.last_values = (
df.tail(self.window).fillna(method='ffill').fillna(method='bfill')
)
self.first_values = (
df.head(self.window).fillna(method='ffill').fillna(method='bfill')
)
df = df.tail(self.window + 1).rolling(window=self.window, min_periods=1).mean()
self.last_rolling = df.tail(1)
return self
def transform(self, df):
"""Returns rolling data
Args:
df (pandas.DataFrame): input dataframe
"""
df = df.rolling(window=self.window, min_periods=1).mean()
# self.last_rolling = df.tail(1)
return df
def fit_transform(self, df):
"""Fits and Returns Magical DataFrame
Args:
df (pandas.DataFrame): input dataframe
"""
self.fit(df)
return self.transform(df)
def inverse_transform(self, df, trans_method: str = "forecast"):
"""Returns data to original *or* forecast form
Args:
df (pandas.DataFrame): input dataframe
trans_method (str): whether to inverse on original data, or on a following sequence
- 'original' return original data to original numbers
- 'forecast' inverse the transform on a dataset immediately following the original
"""
if self.fixed:
return df
else:
window = self.window
if trans_method == 'original':
staged = self.first_values
diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(
len(df.index) - window
)
temp_cols = diffed.columns
for n in range(len(diffed.index)):
temp_index = diffed.index[n]
temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[
n
].reset_index(drop=True).astype(float)
temp_row = pd.DataFrame(
temp_row.values.reshape(1, len(temp_row)), columns=temp_cols
)
temp_row.index = pd.DatetimeIndex([temp_index])
staged = pd.concat([staged, temp_row], axis=0)
return staged
# current_inversed = current * window - cumsum(window-1 to previous)
if trans_method == 'forecast':
staged = self.last_values
df = pd.concat([self.last_rolling, df], axis=0)
diffed = ((df.astype(float) - df.shift(1).astype(float)) * window).tail(
len(df.index)
)
diffed = diffed.tail(len(diffed.index) - 1)
temp_cols = diffed.columns
for n in range(len(diffed.index)):
temp_index = diffed.index[n]
temp_row = diffed.iloc[n].reset_index(drop=True) + staged.iloc[
n
].reset_index(drop=True).astype(float)
temp_row = pd.DataFrame(
temp_row.values.reshape(1, len(temp_row)), columns=temp_cols
)
temp_row.index = pd.DatetimeIndex([temp_index])
staged = pd.concat([staged, temp_row], axis=0)
staged = staged.tail(len(diffed.index))
return staged
"""
df = df_wide_numeric.tail(60).head(50).fillna(0)
df_forecast = (df_wide_numeric).tail(10).fillna(0)
forecats = transformed.tail(10)
test = RollingMeanTransformer().fit(df)
transformed = test.transform(df)
inverse = test.inverse_transform(forecats, trans_method = 'forecast')
df == test.inverse_transform(test.transform(df), trans_method = 'original')
inverse == df_wide_numeric.tail(10)
"""
"""
df = df_wide_numeric.tail(60).fillna(0)
test = SeasonalDifference().fit(df)
transformed = test.transform(df)
forecats = transformed.tail(10)
df == test.inverse_transform(transformed, trans_method = 'original')
df = df_wide_numeric.tail(60).head(50).fillna(0)
test = SeasonalDifference().fit(df)
inverse = test.inverse_transform(forecats, trans_method = 'forecast')
inverse == df_wide_numeric.tail(10).fillna(0)
"""
class SeasonalDifference(EmptyTransformer):
"""Remove seasonal component.
Args:
lag_1 (int): length of seasonal period to remove.
method (str): 'LastValue', 'Mean', 'Median' to construct seasonality
"""
def __init__(self, lag_1: int = 7, method: str = 'LastValue', **kwargs):
super().__init__(name="SeasonalDifference")
self.lag_1 = int(abs(lag_1))
self.method = method
@staticmethod
def get_new_params(method: str = 'random'):
method_c = random.choice(['LastValue', 'Mean', "Median"])
if method == "fast":
choice = random.choice([7, 12])
else:
choice = seasonal_int(include_one=False)
return {"lag_1": choice, "method": method_c}
def fit(self, df):
"""Fits.
Args:
df (pandas.DataFrame): input dataframe
"""
df_length = df.shape[0]
if self.method in ['Mean', 'Median']:
df2 = df.copy()
tile_index = np.tile(
| np.arange(self.lag_1) | numpy.arange |
import numpy as np
import time
import sys
import logging
import csv
from h2o4gpu.solvers import TruncatedSVDH2O
print(sys.path)
logging.basicConfig(level=logging.DEBUG)
def func_bench(m=2000, n = 20, k = 5):
np.random.seed(1234)
X = np.random.rand(m,n)
#Warm start
W = np.random.rand(1000,5)
print('Cusolver Warm Start')
h2o4gpu_tsvd_cusolver = TruncatedSVDH2O(n_components=3, algorithm="cusolver", random_state=42)
h2o4gpu_tsvd_cusolver.fit(W)
print('Power Warm Start')
h2o4gpu_tsvd_power = TruncatedSVDH2O(n_components=3, algorithm="power", tol = 1e-5, n_iter=100, random_state=42, verbose=True)
h2o4gpu_tsvd_power.fit(W)
print("SVD on " + str(X.shape[0]) + " by " + str(X.shape[1]) + " matrix with k=" + str(k))
print("\n")
cusolver_sum_time = 0
power_sum_time = 0
for i in range(5):
start_time_cusolver = time.time()
print("CUSOLVER Bencmark on iteration " + str(i))
h2o4gpu_tsvd_cusolver.n_components = k
h2o4gpu_tsvd_cusolver.fit(X)
end_time_cusolver = time.time() - start_time_cusolver
cusolver_sum_time +=end_time_cusolver
print("Took cusolver " + str(end_time_cusolver) + " seconds on iteration " + str(i))
print("Sleep before Power on iteration " + str(i))
time.sleep(5)
start_time_power = time.time()
print("POWER Bencmark on iteration " + str(i))
h2o4gpu_tsvd_power.n_components = k
h2o4gpu_tsvd_power.fit(X)
end_time_power = time.time() - start_time_power
power_sum_time += end_time_power
print("Took power method " + str(end_time_power) + " seconds on iteration " + str(i))
#Benchmarks
########################################################################
dim = str(m) + "by" + str(n)
with open('power_cusolver_avg_run.csv', 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['cusolver', str(cusolver_sum_time/5), dim, str(k)])
csvwriter.writerow(['power', str(power_sum_time/5), dim, str(k)])
csvfile.close()
#########################################################################
def func(m=2000, n = 20, k = 5):
np.random.seed(1234)
X = np.random.rand(m,n)
print("SVD on " + str(X.shape[0]) + " by " + str(X.shape[1]) + " matrix")
print("\n")
start_time_cusolver = time.time()
print("CUSOLVER")
h2o4gpu_tsvd_cusolver = TruncatedSVDH2O(n_components=k, algorithm="cusolver", random_state=42)
h2o4gpu_tsvd_cusolver.fit(X)
end_time_cusolver = time.time() - start_time_cusolver
print("Took cusolver " + str(end_time_cusolver) + " seconds")
start_time_power = time.time()
print("POWER")
h2o4gpu_tsvd_power = TruncatedSVDH2O(n_components=k, algorithm="power", tol = 1E-50, n_iter=2000, random_state=42, verbose=True)
h2o4gpu_tsvd_power.fit(X)
end_time_power = time.time() - start_time_power
print("Took power method " + str(end_time_power) + " seconds")
print("h2o4gpu cusolver components")
print(h2o4gpu_tsvd_cusolver.components_)
print("h2o4gpu cusolver singular values")
print(h2o4gpu_tsvd_cusolver.singular_values_)
print("h2o4gpu tsvd cusolver Explained Variance")
print(h2o4gpu_tsvd_cusolver.explained_variance_)
print("h2o4gpu tsvd cusolver Explained Variance Ratio")
print(h2o4gpu_tsvd_cusolver.explained_variance_ratio_)
print("h2o4gpu power components")
print(h2o4gpu_tsvd_power.components_)
print("h2o4gpu power singular values")
print(h2o4gpu_tsvd_power.singular_values_)
print("h2o4gpu tsvd power Explained Variance")
print(h2o4gpu_tsvd_power.explained_variance_)
print("h2o4gpu tsvd power Explained Variance Ratio")
print(h2o4gpu_tsvd_power.explained_variance_ratio_)
print("Checking singular values")
rtol = 1E-5
assert np.allclose(h2o4gpu_tsvd_cusolver.singular_values_, h2o4gpu_tsvd_power.singular_values_, rtol=rtol)
print("Checking explained variance")
rtol = 1E-3
assert | np.allclose(h2o4gpu_tsvd_cusolver.explained_variance_, h2o4gpu_tsvd_power.explained_variance_, rtol=rtol) | numpy.allclose |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(203, 'F d -3 :2', transformations)
space_groups[203] = sg
space_groups['F d -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(204, 'I m -3', transformations)
space_groups[204] = sg
space_groups['I m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(205, 'P a -3', transformations)
space_groups[205] = sg
space_groups['P a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(206, 'I a -3', transformations)
space_groups[206] = sg
space_groups['I a -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(207, 'P 4 3 2', transformations)
space_groups[207] = sg
space_groups['P 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(208, 'P 42 3 2', transformations)
space_groups[208] = sg
space_groups['P 42 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(209, 'F 4 3 2', transformations)
space_groups[209] = sg
space_groups['F 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(210, 'F 41 3 2', transformations)
space_groups[210] = sg
space_groups['F 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(211, 'I 4 3 2', transformations)
space_groups[211] = sg
space_groups['I 4 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(212, 'P 43 3 2', transformations)
space_groups[212] = sg
space_groups['P 43 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(213, 'P 41 3 2', transformations)
space_groups[213] = sg
space_groups['P 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(214, 'I 41 3 2', transformations)
space_groups[214] = sg
space_groups['I 41 3 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(215, 'P -4 3 m', transformations)
space_groups[215] = sg
space_groups['P -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(216, 'F -4 3 m', transformations)
space_groups[216] = sg
space_groups['F -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(217, 'I -4 3 m', transformations)
space_groups[217] = sg
space_groups['I -4 3 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(218, 'P -4 3 n', transformations)
space_groups[218] = sg
space_groups['P -4 3 n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(219, 'F -4 3 c', transformations)
space_groups[219] = sg
space_groups['F -4 3 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(220, 'I -4 3 d', transformations)
space_groups[220] = sg
space_groups['I -4 3 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,1,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,0,-1,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,-1,0,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,-1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,0,1,0,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,0,1,0,1,0,0])
rot.shape = (3, 3)
trans_num = | N.array([0,0,0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from init_state import init_state
from quadEOM import quadEOM
from globals import flag, hover_t, hover_z, hover_x, hover_y
from globals import t, s
from globals import systemParameters
from globals import fort_t, fort_x, fort_y, fort_z, backt_t, cruise_z, cruise_x, cruise_y, cruise_t, backt_x, backt_y, backt_z, max_time
from time_traj_land import time_traj_land
from time_traj_backtrans import time_traj_backtrans
from time_traj_cruise import time_traj_cruise
from time_traj_fortrans import time_traj_fortrans
from time_traj_hover import time_traj_hover
from utils import stateToQd
end=-1
def sim_3d(trajhandle, controlhandle):
max_time = 1;
#controlhandle =Controller()
# parameters for simulation
BQ = systemParameters()
# *********************** INITIAL CONDITIONS ***********************
print('Setting initial conditions...')
tstep = 0.01 # this determines the time step at which the solution is given %0.02
cstep = 0.01 # image capture time interval 0.06
max_iter = max_time/cstep # max iteration
#max_iter = 1
nstep = int(cstep/tstep)
time = 0 # current time
# Get start position
# des_start = trajhandle(0, []);
# x0 = init_state(des_start.pos,des_start.vel,des_start.rot,des_start.omega)
store_shape = int(max_iter*nstep)
xtraj = | np.zeros((store_shape, 12)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
#import pymc3 as mcmc
#import emcee
from scipy.optimize import minimize
import h5py as h5
import sys, os
from parser import parse_input
import argparse
from matplotlib.colors import Normalize
from matplotlib.cm import get_cmap
from matplotlib import colorbar
from particle_spectra import default_values
from particle_spectra import default_turbulence_values
from particle_spectra import create_spectra
#from comp_spectra import find_min
#from comp_spectra import find_max
def model_f2(theta, x):
psi, zeta, K = theta
gam0 = 200.0
gamth = 1.1
C = 1.0
A = C/gamth
gamcool = ( ((3.0-psi)/A)*zeta*gam0**(2.0-psi) )**(1./(3.0-psi))
return K*np.exp(-(x/gamcool)**(3.0-psi))
def find_min(xx, th):
i1 = 0
for i,x in enumerate(xx):
if x >= th:
i1 = i
break
return i1
#def model_f(theta, x):
# psi, gamcool, K = theta
# return K*np.exp(-(x/gamcool)**(3.0-psi))
def model_f(theta, x):
psi, gamcool, K, p = theta
#plaw = K*x**(-p)
#plaw = K*x**(0)
#plaw = K/x
plaw = K
return plaw*np.exp(-(x/gamcool)**(3.0-psi))
def log_likelihood(theta, x, y):
model = model_f(theta, x)
nw = len(x)
sigma2 = np.ones(nw)
#sigma2 = y
return -0.5*np.sum((y-model)**2/sigma2 + | np.log(sigma2) | numpy.log |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 25 16:40:54 2019
@author: self-driver
"""
import numpy as np
from matplotlib import pyplot as plt
import Bezier
def bezier_n_curve(points, n, nTimes=1000):
#print("points is : \n",points)
#print(" n is : ",n)
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
print("points is : \n",points)
nPoints = len(points)
count = 0
if((nPoints - 1)%n):
count = n - (nPoints - 1)%n
print('count is : ',count)
# insert Point
'''
while(count):
points_insert = points[-1]+(points[-1]-points[-2])*0.1*count
points = np.insert(points,len(points),values=points_insert, axis=0)
count = count - 1
'''
result = | np.transpose(points) | numpy.transpose |
#!/usr/bin/env python
"""Provides class Simulation that initialises all the elements of the controllers and structures the execution of the
different components.
Simulation initialises all the components of the controller, namely the System, the Incremental Model, the Actor and the
Critic, as well as building the required Neural Networks. It counts with a method that executes in the required order
each of the controller elements during a complete iteration.
"""
import numpy as np
import matplotlib as mpl
mpl.use('TkAgg') # or can use 'TkAgg', whatever you have/prefer
"----------------------------------------------------------------------------------------------------------------------"
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2020 <NAME>"
__credits__ = []
__license__ = "MIT"
__version__ = "2.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
"----------------------------------------------------------------------------------------------------------------------"
class Simulation:
def __init__(self, iterations, selected_inputs, selected_states, selected_outputs, number_time_steps,
initial_states, reference_signals, actor, critic, system, incremental_model,
discretisation_time=0.5, tracking_states=['alpha']):
# Attributes regarding the simulation
self.iterations = iterations
self.number_time_steps = number_time_steps
self.time_step = 0
self.discretisation_time = discretisation_time
self.time = list(np.arange(0, self.number_time_steps * self.discretisation_time, self.discretisation_time))
self.iteration = 0
# Attributes regarding the system
self.selected_inputs = selected_inputs
self.selected_states = selected_states
self.selected_outputs = selected_outputs
self.initial_states = initial_states
self.tracking_states = tracking_states
self.indices_tracking_states = [self.selected_states.index(self.tracking_states[i])
for i in range(len(self.tracking_states))]
self.reference_signals = reference_signals
# Initialise all the elements of the simulation
self.actor = actor
self.critic = critic
self.system = system
self.incremental_model = incremental_model
# Cyclic parameters
self.xt = self.initial_states
self.xt_track = np.reshape(self.xt[self.indices_tracking_states, self.time_step], [-1, 1])
self.xt_ref = | np.reshape(self.reference_signals[:, self.time_step], [-1, 1]) | numpy.reshape |
from collections.abc import Sequence
import random
import cv2
import torch
import numpy as np
def set_all_randomness(seed, set_for_cuda=True):
"""Sets the random seed for numpy, pytorch, python.random
"""
random.seed(seed)
| np.random.seed(seed) | numpy.random.seed |
"""TNQMetro: Tensor-network based package for efficient quantum metrology computations."""
# Table of Contents
#
# 1 Functions for finite size systems......................................29
# 1.1 High level functions...............................................37
# 1.2 Low level functions...............................................257
# 1.2.1 Problems with exact derivative.............................1207
# 1.2.2 Problems with discrete approximation of the derivative.....2411
# 2 Functions for infinite size systems..................................3808
# 2.1 High level functions.............................................3816
# 2.2 Low level functions..............................................4075
# 3 Auxiliary functions..................................................5048
import itertools
import math
import warnings
import numpy as np
from ncon import ncon
########################################
# #
# #
# 1 Functions for finite size systems. #
# #
# #
########################################
#############################
# #
# 1.1 High level functions. #
# #
#############################
def fin(N, so_before_list, h, so_after_list, BC='O', L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the QFI over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence in their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying the quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. These local superoperators have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that the parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h has to be diagonal in the computational basis, or in other words, it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
result, result_m, L, psi0 = fin_gen(N, d, BC, ch, ch2, None, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_gen(N, d, BC, ch, ch2, epsilon=None, L_ini=None, psi0_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True, D_psi0_max=100, D_psi0_max_forced=False):
"""
Optimization of the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and wave function psi0 (in MPS representation) and check of convergence when increasing their bond dimensions. Function for finite size systems.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on the channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
ch: list of length N of ndarrays of a shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of a shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in MPO representation.
ch2: list of length N of ndarrays of a shape (Dl_ch2,Dr_ch2,d**2,d**2) for OBC (Dl_ch2, Dr_ch2 can vary between sites) or ndarray of a shape (D_ch2,D_ch2,d**2,d**2,N) for PBC
Interpretiaon depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of the quantum channel as a superoperator in the MPO representation,
2) the quantum channel as superoperator in the MPO representation for the value of estimated parameter shifted by epsilon in relation to ch.
epsilon: float, optional
If specified then interpeted as value of a separation between estimated parameters encoded in ch and ch2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
psi0_ini: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC, optional
Initial MPS for psi0.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if the Hermitian gauge has to be imposed on MPO representing L, otherwise False.
D_psi0_max: integer, optional
Maximal value of D_psi0 (D_psi0 is bond dimension for MPS representing psi0).
D_psi0_max_forced: bool, optional
True if D_psi0_max has to be reached, otherwise False.
Returns:
result: float
Optimal value of the figure of merit.
result_m: ndarray
Matrix describing the figure of merit as a function of bond dimensions of respectively L [rows] and psi0 [columns].
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
psi0: list of length N of ndarrays of a shape (Dl_psi0,Dr_psi0,d) for OBC (Dl_psi0, Dr_psi0 can vary between sites) or ndarray of a shape (D_psi0,D_psi0,d,N) for PBC
Optimal psi0 in MPS representation.
"""
if epsilon is None:
result, result_m, L, psi0 = fin_FoM_FoMD_optbd(N, d, BC, ch, ch2, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
else:
result, result_m, L, psi0 = fin2_FoM_FoMD_optbd(N, d, BC, ch, ch2, epsilon, L_ini, psi0_ini, imprecision, D_L_max, D_L_max_forced, L_herm, D_psi0_max, D_psi0_max_forced)
return result, result_m, L, psi0
def fin_state(N, so_before_list, h, so_after_list, rho0, BC='O', L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the QFI over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel. It is assumed that the quantum channel is translationally invariant and is built from layers of quantum operations.
User has to provide one defining operation for each layer as a local superoperator. Those local superoperator have to be input in order of their action on the system.
Parameter encoding is a stand out quantum operation. It is assumed that parameter encoding acts only once and is unitary so the user has to provide only its generator h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding. Dimension d is the dimension of local Hilbert space (dimension of physical index).
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
rho0: list of length N of ndarrays of a shape (Dl_rho0,Dr_rho0,d,d) for OBC (Dl_rho0, Dr_rho0 can vary between sites) or ndarray of a shape (D_rho0,D_rho0,d,d,N) for PBC
Density matrix describing initial state of the system in MPO representation.
BC: 'O' or 'P', optional
Boundary conditions, 'O' for OBC, 'P' for PBC.
L_ini: list of length N of ndarrays of shape (Dl_L,Dr_L,d,d) for OBC, (Dl_L, Dr_L can vary between sites) or ndarray of shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit in function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in the MPO representation.
"""
if np.linalg.norm(h - np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
d = np.shape(h)[0]
ch = fin_create_channel(N, d, BC, so_before_list + so_after_list)
ch2 = fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list)
rho = channel_acting_on_operator(ch, rho0)
rho2 = channel_acting_on_operator(ch2, rho0)
result, result_v, L = fin_state_gen(N, d, BC, rho, rho2, None, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
def fin_state_gen(N, d, BC, rho, rho2, epsilon=None, L_ini=None, imprecision=10**-2, D_L_max=100, D_L_max_forced=False, L_herm=True):
"""
Optimization of the the figure of merit (usually interpreted as the QFI) over operator L (in MPO representation) and check of convergence when increasing its bond dimension. Function for finite size systems and fixed state of the system.
User has to provide information about the dynamics by specifying a quantum channel ch and its derivative ch2 (or two channels separated by small parameter epsilon) as superoperators in the MPO representation.
There are no constraints on the structure of the channel but the complexity of calculations highly depends on channel's bond dimension.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
rho: list of length N of ndarrays of a shape (Dl_rho,Dr_rho,d,d) for OBC (Dl_rho, Dr_rho can vary between sites) or ndarray of a shape (D_rho,D_rho,d,d,N) for PBC
Density matrix at the output of the quantum channel in the MPO representation.
rho2: list of length N of ndarrays of a shape (Dl_rho2,Dr_rho2,d,d) for OBC (Dl_rho2, Dr_rho2 can vary between sites) or ndarray of a shape (D_rho2,D_rho2,d,d,N) for PBC
Interpretaion depends on whether epsilon is specifed (2) or not (1, default approach):
1) derivative of density matrix at the output of quantum channel in MPO representation,
2) density matrix at the output of quantum channel in MPO representation for the value of estimated parameter shifted by epsilon in relation to rho.
epsilon: float, optional
If specified then it is interpeted as the value of separation between estimated parameters encoded in rho and rho2.
L_ini: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC, optional
Initial MPO for L.
imprecision: float, optional
Expected relative imprecision of the end results.
D_L_max: integer, optional
Maximal value of D_L (D_L is bond dimension for MPO representing L).
D_L_max_forced: bool, optional
True if D_L_max has to be reached, otherwise False.
L_herm: bool, optional
True if Hermitian gauge has to be imposed on MPO representing L, otherwise False.
Returns:
result: float
Optimal value of figure of merit.
result_v: ndarray
Vector describing figure of merit as a function of bond dimensions of L.
L: list of length N of ndarrays of a shape (Dl_L,Dr_L,d,d) for OBC (Dl_L, Dr_L can vary between sites) or ndarray of a shape (D_L,D_L,d,d,N) for PBC
Optimal L in MPO representation.
"""
if epsilon is None:
result, result_v, L = fin_FoM_optbd(N, d, BC, rho, rho2, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
else:
result, result_v, L = fin2_FoM_optbd(N, d, BC, rho, rho2, epsilon, L_ini, imprecision, D_L_max, D_L_max_forced, L_herm)
return result, result_v, L
############################
# #
# 1.2 Low level functions. #
# #
############################
def fin_create_channel(N, d, BC, so_list, tol=10**-10):
"""
Creates MPO for a superoperator describing translationally invariant quantum channel from list of local superoperators. Function for finite size systems.
For OBC, tensor-network length N has to be at least 2k-1, where k is the correlation length (number of sites on which acts the biggest local superoperator).
Local superoperators acting on more then 4 neighbouring sites are not currently supported.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
For OBC tensor-network length N has to be at least 2k-1 where k is the correlation length (number of sites on which acts the biggest local superoperator).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites a particular local superoperator acts
List of local superoperators in order of their action on the system.
Local superoperators acting on more then 4 neighbour sites are not currently supported.
tol: float, optional
Factor which after multiplication by the highest singular value gives a cutoff on singular values that are treated as nonzero.
Returns:
ch: list of length N of ndarrays of shape (Dl_ch,Dr_ch,d**2,d**2) for OBC (Dl_ch, Dr_ch can vary between sites) or ndarray of shape (D_ch,D_ch,d**2,d**2,N) for PBC
Quantum channel as a superoperator in the MPO representation.
"""
if so_list == []:
if BC == 'O':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:]
ch = [ch]*N
elif BC == 'P':
ch = np.eye(d**2,dtype=complex)
ch = ch[np.newaxis,np.newaxis,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
if BC == 'O':
ch = [0]*N
kmax = max([int(math.log(np.shape(so_list[i])[0],d**2)) for i in range(len(so_list))])
if N < 2*kmax-1:
warnings.warn('For OBC tensor-network length N have to be at least 2k-1 where k is correlation length (number of sites on which acts the biggest local superoperator).')
for x in range(N):
if x >= kmax and N-x >= kmax:
ch[x] = ch[x-1]
continue
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchil = 1
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
if x == 0:
bdchil = 1
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 0 and x < N-1:
bdchil = bdchi
bdchir = bdchi
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx],us[nx,:]]
legs = [[-1],[-2]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 1 and x < N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi2*bdchi1
bdchir = bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi2
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
if x == 0:
bdchil = 1
bdchir = bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us1[nx,:]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 1:
bdchil = bdchi1
bdchir = bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us2[:,nx,:],us1[nx,:]]
legs = [[-1,-2],[-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == 2:
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1,-3],[-2,-4],[-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x > 2 and x < N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-3:
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:]]
legs = [[-1],[-2,-4],[-3,-5]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-2:
bdchil = bdchi3*bdchi2
bdchir = bdchi3
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:]]
legs = [[-1],[-2,-3]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
elif x == N-1:
bdchil = bdchi3
bdchir = 1
chi = np.zeros((bdchil,bdchir,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx]]
legs = [[-1]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchil,bdchir),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchil = 1
bdchir = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
if x == 0:
tensors = [us]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi
elif x > 0 and x < N-1:
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = bdchi
elif x == N-1:
tensors = [sv]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-5,1,-2],[1,-6,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x > 1 and x < N-2:
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2*bdchi1
elif x == N-2:
tensors = [sv2,us2]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi2
elif x == N-1:
tensors = [sv2]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi2
bdchir = 1
chi = np.reshape(chi,(bdchil,bdchir,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
if x == 0:
tensors = [us1]
legs = [[-2,-3,-1]]
chi = ncon(tensors,legs)
bdchil = 1
bdchir = bdchi1
elif x == 1:
tensors = [us2,us1]
legs = [[-1,-4,1,-2],[1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi1
bdchir = bdchi2*bdchi1
elif x == 2:
tensors = [us3,us2,us1]
legs = [[-1,-6,1,-3],[-2,1,2,-4],[2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x > 2 and x < N-3:
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2*bdchi1
elif x == N-3:
tensors = [sv3,us3,us2]
legs = [[-1,-6,1],[-2,1,2,-4],[-3,2,-7,-5]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2*bdchi1
bdchir = bdchi3*bdchi2
elif x == N-2:
tensors = [sv3,us3]
legs = [[-1,-4,1],[-2,1,-5,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3*bdchi2
bdchir = bdchi3
elif x == N-1:
tensors = [sv3]
legs = [[-1,-2,-3]]
chi = ncon(tensors,legs)
bdchil = bdchi3
bdchir = 1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdchl = bdchil
bdchr = bdchir
ch[x] = chi
else:
bdchl = bdchil*bdchl
bdchr = bdchir*bdchr
tensors = [chi,ch[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch[x] = ncon(tensors,legs)
ch[x] = np.reshape(ch[x],(bdchl,bdchr,d**2,d**2),order='F')
elif BC == 'P':
for i in range(len(so_list)):
so = so_list[i]
k = int(math.log(np.shape(so)[0],d**2))
if np.linalg.norm(so-np.diag(np.diag(so))) < 10**-10:
so = np.diag(so)
if k == 1:
bdchi = 1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = so[nx]
elif k == 2:
so = np.reshape(so,(d**2,d**2),order='F')
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
chi[:,:,nx,nx] = np.outer(sv[:,nx],us[nx,:])
elif k == 3:
so = np.reshape(so,(d**2,d**4),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**2),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
bdchi = bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv2[:,nx],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-3],[-4]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
elif k == 4:
so = np.reshape(so,(d**2,d**6),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
sv1 = np.reshape(sv1,(bdchi1*d**2,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**2,d**2),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
bdchi = bdchi3*bdchi2*bdchi1
chi = np.zeros((bdchi,bdchi,d**2,d**2),dtype=complex)
for nx in range(d**2):
tensors = [sv3[:,nx],us3[:,nx,:],us2[:,nx,:],us1[nx,:]]
legs = [[-1],[-2,-4],[-3,-5],[-6]]
chi[:,:,nx,nx] = np.reshape(ncon(tensors,legs),(bdchi,bdchi),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
else:
if k == 1:
bdchi = 1
chi = so[np.newaxis,np.newaxis,:,:]
elif k == 2:
u,s,vh = np.linalg.svd(so)
s = s[s > s[0]*tol]
bdchi = np.shape(s)[0]
u = u[:,:bdchi]
vh = vh[:bdchi,:]
us = u @ np.diag(np.sqrt(s))
sv = np.diag(np.sqrt(s)) @ vh
us = np.reshape(us,(d**2,d**2,bdchi),order='F')
sv = np.reshape(sv,(bdchi,d**2,d**2),order='F')
tensors = [sv,us]
legs = [[-1,-3,1],[1,-4,-2]]
chi = ncon(tensors,legs)
elif k == 3:
so = np.reshape(so,(d**4,d**8),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**4),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2,d**2,d**2),order='F')
tensors = [sv2,us2,us1]
legs = [[-1,-5,1],[-2,1,2,-3],[2,-6,-4]]
chi = ncon(tensors,legs)
bdchi = bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
elif k == 4:
so = np.reshape(so,(d**4,d**12),order='F')
u1,s1,vh1 = np.linalg.svd(so,full_matrices=False)
s1 = s1[s1 > s1[0]*tol]
bdchi1 = np.shape(s1)[0]
u1 = u1[:,:bdchi1]
vh1 = vh1[:bdchi1,:]
us1 = u1 @ np.diag(np.sqrt(s1))
sv1 = np.diag(np.sqrt(s1)) @ vh1
us1 = np.reshape(us1,(d**2,d**2,bdchi1),order='F')
sv1 = np.reshape(sv1,(bdchi1*d**4,d**8),order='F')
u2,s2,vh2 = np.linalg.svd(sv1,full_matrices=False)
s2 = s2[s2 > s2[0]*tol]
bdchi2 = np.shape(s2)[0]
u2 = u2[:,:bdchi2]
vh2 = vh2[:bdchi2,:]
us2 = u2 @ np.diag(np.sqrt(s2))
us2 = np.reshape(us2,(bdchi1,d**2,d**2,bdchi2),order='F')
sv2 = np.diag(np.sqrt(s2)) @ vh2
sv2 = np.reshape(sv2,(bdchi2*d**4,d**4),order='F')
u3,s3,vh3 = np.linalg.svd(sv2,full_matrices=False)
s3 = s3[s3 > s3[0]*tol]
bdchi3 = np.shape(s3)[0]
u3 = u3[:,:bdchi3]
vh3 = vh3[:bdchi3,:]
us3 = u3 @ np.diag(np.sqrt(s3))
us3 = np.reshape(us3,(bdchi2,d**2,d**2,bdchi3),order='F')
sv3 = np.diag(np.sqrt(s3)) @ vh3
sv3 = np.reshape(sv3,(bdchi3,d**2,d**2),order='F')
tensors = [sv3,us3,us2,us1]
legs = [[-1,-7,1],[-2,1,2,-4],[-3,2,3,-5],[3,-8,-6]]
chi = ncon(tensors,legs)
bdchi = bdchi3*bdchi2*bdchi1
chi = np.reshape(chi,(bdchi,bdchi,d**2,d**2),order='F')
else:
warnings.warn('Local superoperators acting on more then 4 neighbour sites are not currently supported.')
if i == 0:
bdch = bdchi
ch = chi
else:
bdch = bdchi*bdch
tensors = [chi,ch]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
ch = ncon(tensors,legs)
ch = np.reshape(ch,(bdch,bdch,d**2,d**2),order='F')
ch = ch[:,:,:,:,np.newaxis]
ch = np.tile(ch,(1,1,1,1,N))
return ch
def fin_create_channel_derivative(N, d, BC, so_before_list, h, so_after_list):
"""
Creates a MPO for the derivative (over estimated parameter) of the superoperator describing the quantum channel. Function for finite size systems.
Function for translationally invariant channels with unitary parameter encoding generated by h.
Generator h has to be diagonal in the computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
so_before_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act before unitary parameter encoding.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.
so_after_list: list of ndarrays of a shape (d**(2*k),d**(2*k)) where k describes on how many sites particular local superoperator acts
List of local superoperators (in order) which act after unitary parameter encoding.
Returns:
chd: list of length N of ndarrays of a shape (Dl_chd,Dr_chd,d**2,d**2) for OBC (Dl_chd, Dr_chd can vary between sites) or ndarray of a shape (D_chd,D_chd,d**2,d**2,N) for PBC
Derivative of superoperator describing quantum channel in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that local superoperators are expressed in the eigenbasis of h.')
if len(so_before_list) == 0:
if BC == 'O':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:]
ch1 = [ch1]*N
elif BC == 'P':
ch1 = np.eye(d**2,dtype=complex)
ch1 = ch1[np.newaxis,np.newaxis,:,:,np.newaxis]
ch1 = np.tile(ch1,(1,1,1,1,N))
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
elif len(so_after_list) == 0:
ch1 = fin_create_channel(N,d,BC,so_before_list)
chd = fin_commutator(N,d,BC,ch1,h,1j)
else:
ch1 = fin_create_channel(N,d,BC,so_before_list)
ch1d = fin_commutator(N,d,BC,ch1,h,1j)
ch2 = fin_create_channel(N,d,BC,so_after_list)
if BC == 'O':
chd = [0]*N
for x in range(N):
bdch1dl = np.shape(ch1d[x])[0]
bdch1dr = np.shape(ch1d[x])[1]
bdch2l = np.shape(ch2[x])[0]
bdch2r = np.shape(ch2[x])[1]
tensors = [ch2[x],ch1d[x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[x] = np.reshape(ncon(tensors,legs),(bdch1dl*bdch2l,bdch1dr*bdch2r,d**2,d**2),order='F')
elif BC == 'P':
bdch1d = np.shape(ch1d)[0]
bdch2 = np.shape(ch2)[0]
chd = np.zeros((bdch1d*bdch2,bdch1d*bdch2,d**2,d**2,N),dtype=complex)
for x in range(N):
tensors = [ch2[:,:,:,:,x],ch1d[:,:,:,:,x]]
legs = [[-1,-3,-5,1],[-2,-4,1,-6]]
chd[:,:,:,:,x] = np.reshape(ncon(tensors,legs),(bdch1d*bdch2,bdch1d*bdch2,d**2,d**2),order='F')
return chd
def fin_commutator(N, d, BC, a, h, c):
"""
Calculate MPO for commutator b = [a, c*sum{h}] of MPO a with sum of local generators h and with arbitrary multiplicative scalar factor c.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
Parameters:
N: integer
Number of sites in the chain of tensors (usually number of particles).
d: integer
Dimension of local Hilbert space (dimension of physical index).
BC: 'O' or 'P'
Boundary conditions, 'O' for OBC, 'P' for PBC.
a: list of length N of ndarrays of a shape (Dl_a,Dr_a,d,d) for OBC (Dl_a, Dr_a can vary between sites) or ndarray of a shape (D_a,D_a,d,d,N) for PBC
MPO.
h: ndarray of a shape (d,d)
Generator of unitary parameter encoding.
Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.
c: complex
Scalar factor which multiplies sum of local generators.
Returns:
b: list of length N of ndarrays of a shape (Dl_b,Dr_b,d,d) for OBC (Dl_b, Dr_b can vary between sites) or ndarray of a shape (D_b,D_b,d,d,N) for PBC
Commutator [a, c*sum{h}] in MPO representation.
"""
if np.linalg.norm(h-np.diag(np.diag(h))) > 10**-10:
warnings.warn('Generator h have to be diagonal in computational basis, or in other words it is assumed that a is expressed in the eigenbasis of h.')
if BC == 'O':
bh = [0]*N
b = [0]*N
for x in range(N):
da = np.shape(a[x])[2]
bda1 = np.shape(a[x])[0]
bda2 = np.shape(a[x])[1]
if x == 0:
bdbh1 = 1
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x > 0 and x < N-1:
bdbh1 = 2
bdbh2 = 2
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
elif x == N-1:
bdbh1 = 2
bdbh2 = 1
bh[x] = np.zeros((bdbh1,bdbh2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
bh[x][:,:,nx,nxp] = np.array([[1],[c*(h[nxp,nxp]-h[nx,nx])]])
if da == d:
# a is operator
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx,nxp],a[x][:,:,nx,nxp])
elif da == d**2:
# a is superoperator (vectorized channel)
bh[x] = np.reshape(bh[x],(bdbh1,bdbh2,d**2),order='F')
b[x] = np.zeros((bdbh1*bda1,bdbh2*bda2,d**2,d**2),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
b[x][:,:,nx,nxp] = np.kron(bh[x][:,:,nx],a[x][:,:,nx,nxp])
elif BC == 'P':
da = np.shape(a)[2]
bda = np.shape(a)[0]
if N == 1:
bdbh = 1
else:
bdbh = 2
bh = np.zeros((bdbh,bdbh,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
if N == 1:
bh[:,:,nx,nxp,0] = c*(h[nxp,nxp]-h[nx,nx])
else:
bh[:,:,nx,nxp,0] = np.array([[c*(h[nxp,nxp]-h[nx,nx]),1],[0,0]])
for x in range(1,N-1):
bh[:,:,nx,nxp,x] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),1]])
bh[:,:,nx,nxp,N-1] = np.array([[1,0],[c*(h[nxp,nxp]-h[nx,nx]),0]])
if da == d:
# a is operator
b = np.zeros((bdbh*bda,bdbh*bda,d,d,N),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,nxp,x],a[:,:,nx,nxp,x])
elif da == d**2:
# a is superoperator (vectorized channel)
bh = np.reshape(bh,(bdbh,bdbh,d**2,N),order='F')
b = np.zeros((bdbh*bda,bdbh*bda,d**2,d**2,N),dtype=complex)
for nx in range(d**2):
for nxp in range(d**2):
for x in range(N):
b[:,:,nx,nxp,x] = np.kron(bh[:,:,nx,x],a[:,:,nx,nxp,x])
return b
def fin_enlarge_bdl(cold,factor):
"""
Enlarge bond dimension of SLD MPO. Function for finite size systems.
Parameters:
cold: SLD MPO, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
factor: factor which determine on average relation between old and newly added values of SLD MPO
Returns:
c: SLD MPO with bd += 1
"""
rng = np.random.default_rng()
if type(cold) is list:
n = len(cold)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
c = [0]*n
x = 0
d = np.shape(cold[x])[2]
bdl1 = 1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl2-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl2-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
for x in range(1,n-1):
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = np.shape(cold[x])[1]+1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/((bdl1-1)*(bdl2-1))
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
x = n-1
d = np.shape(cold[x])[2]
bdl1 = np.shape(cold[x])[0]+1
bdl2 = 1
c[x] = np.zeros((bdl1,bdl2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
meanrecold = np.sum(np.abs(np.real(cold[x][:,:,nx,nxp])))/(bdl1-1)
meanimcold = np.sum(np.abs(np.imag(cold[x][:,:,nx,nxp])))/(bdl1-1)
c[x][:,:,nx,nxp] = (meanrecold*rng.random((bdl1,bdl2))+1j*meanimcold*rng.random((bdl1,bdl2)))*factor
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[x][0:bdl1-1,0:bdl2-1,:,:] = cold[x]
elif type(cold) is np.ndarray:
n = np.shape(cold)[4]
d = np.shape(cold)[2]
bdl = np.shape(cold)[0]+1
c = np.zeros((bdl,bdl,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for x in range(n):
meanrecold = np.sum(np.abs(np.real(cold[:,:,nx,nxp,x])))/(bdl-1)**2
meanimcold = np.sum(np.abs(np.imag(cold[:,:,nx,nxp,x])))/(bdl-1)**2
c[:,:,nx,nxp,x] = (meanrecold*rng.random((bdl,bdl))+1j*meanimcold*rng.random((bdl,bdl)))*factor
c = (c + np.conj(np.moveaxis(c,2,3)))/2
c[0:bdl-1,0:bdl-1,:,:,:] = cold
return c
def fin_enlarge_bdpsi(a0old,factor):
"""
Enlarge bond dimension of wave function MPS. Function for finite size systems.
Parameters:
a0old: wave function MPS, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ratio: factor which determine on average relation between last and next to last values of diagonals of wave function MPS
Returns:
a0: wave function MPS with bd += 1
"""
rng = np.random.default_rng()
if type(a0old) is list:
n = len(a0old)
if n == 1:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
a0 = [0]*n
x = 0
d = np.shape(a0old[x])[2]
bdpsi1 = 1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi2-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi2-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
for x in range(1,n-1):
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = np.shape(a0old[x])[1]+1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/((bdpsi1-1)*(bdpsi2-1))
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
x = n-1
d = np.shape(a0old[x])[2]
bdpsi1 = np.shape(a0old[x])[0]+1
bdpsi2 = 1
a0[x] = np.zeros((bdpsi1,bdpsi2,d),dtype=complex)
for nx in range(d):
meanrea0old = np.sum(np.abs(np.real(a0old[x][:,:,nx])))/(bdpsi1-1)
meanima0old = np.sum(np.abs(np.imag(a0old[x][:,:,nx])))/(bdpsi1-1)
a0[x][:,:,nx] = (meanrea0old*rng.random((bdpsi1,bdpsi2))+1j*meanima0old*rng.random((bdpsi1,bdpsi2)))*factor
a0[x][0:bdpsi1-1,0:bdpsi2-1,:] = a0old[x]
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[n-1] = a0[n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[n-1]),a0[n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[x] = a0[x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[x]),a0[x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[0]),a0[0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[0] = a0[0]/np.sqrt(np.abs(r1))
elif type(a0old) is np.ndarray:
n = np.shape(a0old)[3]
d = np.shape(a0old)[2]
bdpsi = np.shape(a0old)[0]+1
a0 = np.zeros((bdpsi,bdpsi,d,n),dtype=complex)
for nx in range(d):
for x in range(n):
meanrea0old = np.sum(np.abs(np.real(a0old[:,:,nx,x])))/(bdpsi-1)**2
meanima0old = np.sum(np.abs(np.imag(a0old[:,:,nx,x])))/(bdpsi-1)**2
a0[:,:,nx,x] = (meanrea0old*rng.random((bdpsi,bdpsi))+1j*meanima0old*rng.random((bdpsi,bdpsi)))*factor
a0[0:bdpsi-1,0:bdpsi-1,:,:] = a0old
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[2,2,1],[3,3,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
else:
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r1 = ncon(tensors,legs)
a0[:,:,:,n-1] = a0[:,:,:,n-1]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
r2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r1 = ncon(tensors,legs)
a0[:,:,:,x] = a0[:,:,:,x]/np.sqrt(np.linalg.norm(np.reshape(r1,-1,order='F')))
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],r2]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
r2 = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0],r2]
legs = [[4,2,1],[5,3,1],[2,3,4,5]]
r1 = ncon(tensors,legs)
a0[:,:,:,0] = a0[:,:,:,0]/np.sqrt(np.abs(r1))
return a0
#########################################
# 1.2.1 Problems with exact derivative. #
#########################################
def fin_FoM_FoMD_optbd(n,d,bc,ch,chp,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also check of convergence in bond dimensions. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD in function of bd of respectively SLD MPO [rows] and initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin_FoM_optbd(n,d,bc,a,b,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM in function of bd of SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin_FoMD_optbd(n,d,bc,c2d,cpd,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence in bond dimension. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
resultv = np.zeros(bdpsimax,dtype=float)
if bc == 'O':
resultv[bdpsi-1],a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
if n == 1:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
return result,resultv,a0
elif bc == 'P':
resultv[bdpsi-1],a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdpsi == bdpsimax:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
else:
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
if bc == 'O':
resultv[bdpsi-1],a0new = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
resultv[bdpsi-1],a0new = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
if resultv[bdpsi-1] >= resultv[bdpsi-2]:
a0 = a0new
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultv[bdpsi-1] < (1+imprecision)*resultv[bdpsi-2]:
resultv = resultv[0:bdpsi]
result = resultv[bdpsi-1]
break
if not(problem):
break
return result,resultv,a0
def fin_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,imprecision=10**-2,lherm=True):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS. Function for finite size systems.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
ch: MPO for quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for generalized derivative of quantum channel, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fval: optimal value of FoM/FoMD
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
relunc_f = 0.1*imprecision
if bc == 'O':
chd = [0]*n
chpd = [0]*n
for x in range(n):
chd[x] = np.conj(np.moveaxis(ch[x],2,3))
chpd[x] = np.conj(np.moveaxis(chp[x],2,3))
elif bc == 'P':
chd = np.conj(np.moveaxis(ch,2,3))
chpd = np.conj(np.moveaxis(chp,2,3))
f = np.array([])
iter_f = 0
while True:
a0_dm = wave_function_to_density_matrix(a0)
a = channel_acting_on_operator(ch,a0_dm)
b = channel_acting_on_operator(chp,a0_dm)
if bc == 'O':
fom,c = fin_FoM_OBC_optm(a,b,c,imprecision,lherm)
elif bc == 'P':
fom,c = fin_FoM_PBC_optm(a,b,c,imprecision,lherm)
f = np.append(f,fom)
if iter_f >= 2 and np.std(f[-4:])/np.mean(f[-4:]) <= relunc_f:
break
if bc == 'O':
c2 = [0]*n
for x in range(n):
bdl1 = np.shape(c[x])[0]
bdl2 = np.shape(c[x])[1]
c2[x] = np.zeros((bdl1**2,bdl2**2,d,d),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
c2[x][:,:,nx,nxp] = c2[x][:,:,nx,nxp]+np.kron(c[x][:,:,nx,nxpp],c[x][:,:,nxpp,nxp])
elif bc == 'P':
bdl = np.shape(c)[0]
c2 = np.zeros((bdl**2,bdl**2,d,d,n),dtype=complex)
for nx in range(d):
for nxp in range(d):
for nxpp in range(d):
for x in range(n):
c2[:,:,nx,nxp,x] = c2[:,:,nx,nxp,x]+np.kron(c[:,:,nx,nxpp,x],c[:,:,nxpp,nxp,x])
c2d = channel_acting_on_operator(chd,c2)
cpd = channel_acting_on_operator(chpd,c)
if bc == 'O':
fomd,a0 = fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision)
elif bc == 'P':
fomd,a0 = fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision)
f = np.append(f,fomd)
iter_f += 1
fval = f[-1]
return fval,c,a0
def fin_FoM_OBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = len(c)
tol_fom = 0.1*imprecision/n**2
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
d = np.shape(c[0])[2]
tensors = [b[0][0,0,:,:]]
legs = [[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0][0,0,:,:],np.eye(d)]
legs = [[-2,-3],[-4,-1]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(d*d,d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0][0,0,:,:] = np.reshape(cv,(d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fom = 0.1*imprecision
l1f = [0]*n
l2f = [0]*n
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[n-2] = ncon(tensors,legs)
l1f[n-2] = l1f[n-2][:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[n-2] = ncon(tensors,legs)
l2f[n-2] = l2f[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1f[x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1f[x-1] = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2f[x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2f[x-1] = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[0])
tensors = [b[0],l1f[0]]
legs = [[-5,1,-4,-3],[-2,1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[0],np.eye(d),l2f[0]]
legs = [[-9,1,-4,-7],[-8,-3],[-2,1,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[0] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[0] = (c[0]+np.conj(np.moveaxis(c[0],2,3)))/2
cv = np.reshape(c[0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[0],b[0]]
legs = [[-3,-1,1,2],[-4,-2,2,1]]
l1c = ncon(tensors,legs)
l1c = l1c[:,:,0,0]
tensors = [c[0],a[0],c[0]]
legs = [[-4,-1,1,2],[-5,-2,2,3],[-6,-3,3,1]]
l2c = ncon(tensors,legs)
l2c = l2c[:,:,:,0,0,0]
for x in range(1,n-1):
bdl1,bdl2,d,d = np.shape(c[x])
tensors = [l1c,b[x],l1f[x]]
legs = [[-1,1],[1,2,-4,-3],[-2,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[x],np.eye(d),l2f[x]]
legs = [[-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[x] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[x] = (c[x]+np.conj(np.moveaxis(c[x],2,3)))/2
cv = np.reshape(c[x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[x],b[x]]
legs = [[3,4],[3,-1,1,2],[4,-2,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[x],a[x],c[x]]
legs = [[4,5,6],[4,-1,1,2],[5,-2,2,3],[6,-3,3,1]]
l2c = ncon(tensors,legs)
bdl1,bdl2,d,d = np.shape(c[n-1])
tensors = [l1c,b[n-1]]
legs = [[-1,1],[1,-5,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[n-1],np.eye(d)]
legs = [[-1,1,-5],[1,-9,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl1*bdl2*d*d,bdl1*bdl2*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[n-1] = np.reshape(cv,(bdl1,bdl2,d,d),order='F')
if lherm:
c[n-1] = (c[n-1]+np.conj(np.moveaxis(c[n-1],2,3)))/2
cv = np.reshape(c[n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoM_PBC_optm(a,b,c,imprecision=10**-2,lherm=True):
"""
Optimization of FoM over MPO for SLD. Function for finite size systems with PBC.
Parameters:
a: MPO for density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for SLD, expected ndarray of a shape (bd,bd,d,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
fomval: optimal value of FoM
c: optimal MPO for SLD
"""
n = np.shape(a)[4]
d = np.shape(a)[2]
bdr = np.shape(a)[0]
bdrp = np.shape(b)[0]
bdl = np.shape(c)[0]
tol_fom = 0.1*imprecision/n**2
if n == 1:
tensors = [b[:,:,:,:,0],np.eye(bdl)]
legs = [[1,1,-4,-3],[-2,-1]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),np.eye(bdl),np.eye(bdl)]
legs = [[1,1,-4,-7],[-8,-3],[-2,-1],[-6,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fomval = np.real(2*cv @ l1 - cv @ l2 @ cv)
else:
relunc_fom = 0.1*imprecision
l1f = np.zeros((bdl,bdrp,bdl,bdrp,n-1),dtype=complex)
l2f = np.zeros((bdl,bdr,bdl,bdl,bdr,bdl,n-1),dtype=complex)
fom = np.array([])
iter_fom = 0
while True:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1f[:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2f[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1f[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2f[:,:,:,:,:,:,x]]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2f[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [b[:,:,:,:,0],l1f[:,:,:,:,0]]
legs = [[2,1,-4,-3],[-2,1,-1,2]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [a[:,:,:,:,0],np.eye(d),l2f[:,:,:,:,:,:,0]]
legs = [[2,1,-4,-7],[-8,-3],[-2,1,-6,-1,2,-5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,0] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,0] = (c[:,:,:,:,0]+np.conj(np.moveaxis(c[:,:,:,:,0],2,3)))/2
cv = np.reshape(c[:,:,:,:,0],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2c = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l1c,b[:,:,:,:,x],l1f[:,:,:,:,x]]
legs = [[3,4,-1,1],[1,2,-4,-3],[-2,2,3,4]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,x],np.eye(d),l2f[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-5],[1,2,-4,-7],[-8,-3],[-2,2,-6,3,4,5]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,x] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,x] = (c[:,:,:,:,x]+np.conj(np.moveaxis(c[:,:,:,:,x],2,3)))/2
cv = np.reshape(c[:,:,:,:,x],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
tensors = [l1c,c[:,:,:,:,x],b[:,:,:,:,x]]
legs = [[-1,-2,3,4],[3,-3,1,2],[4,-4,2,1]]
l1c = ncon(tensors,legs)
tensors = [l2c,c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x]]
legs = [[-1,-2,-3,4,5,6],[4,-4,1,2],[5,-5,2,3],[6,-6,3,1]]
l2c = ncon(tensors,legs)
tensors = [l1c,b[:,:,:,:,n-1]]
legs = [[-2,2,-1,1],[1,2,-4,-3]]
l1 = ncon(tensors,legs)
l1 = np.reshape(l1,-1,order='F')
tensors = [l2c,a[:,:,:,:,n-1],np.eye(d)]
legs = [[-2,2,-6,-1,1,-5],[1,2,-4,-7],[-8,-3]]
l2 = ncon(tensors,legs)
l2 = np.reshape(l2,(bdl*bdl*d*d,bdl*bdl*d*d),order='F')
dl2 = l2+l2.T
dl1 = 2*l1
dl2pinv = np.linalg.pinv(dl2,tol_fom)
dl2pinv = (dl2pinv+dl2pinv.T)/2
cv = dl2pinv @ dl1
c[:,:,:,:,n-1] = np.reshape(cv,(bdl,bdl,d,d),order='F')
if lherm:
c[:,:,:,:,n-1] = (c[:,:,:,:,n-1]+np.conj(np.moveaxis(c[:,:,:,:,n-1],2,3)))/2
cv = np.reshape(c[:,:,:,:,n-1],-1,order='F')
fom = np.append(fom,np.real(2*cv @ l1 - cv @ l2 @ cv))
iter_fom += 1
if iter_fom >= 2 and all(fom[-2*n:] > 0) and np.std(fom[-2*n:])/np.mean(fom[-2*n:]) <= relunc_fom:
break
fomval = fom[-1]
return fomval,c
def fin_FoMD_OBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
d = np.shape(a0[0])[2]
tensors = [c2d[0][0,0,:,:]]
legs = [[-1,-2]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(d,d),order='F')
tensors = [cpd[0][0,0,:,:]]
legs = [[-1,-2]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(d,d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0][0,0,:] = np.reshape(a0v,(d),order='F')
fomdval = np.real(fomdval[position])
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
relunc_fomd = 0.1*imprecision
l2df = [0]*n
lpdf = [0]*n
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[n-2] = ncon(tensors,legs)
l2df[n-2] = l2df[n-2][:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[n-2] = ncon(tensors,legs)
lpdf[n-2] = lpdf[n-2][:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2df[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2df[x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpdf[x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpdf[x-1] = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[0])
tensors = [c2d[0],l2df[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [cpd[0],lpdf[0]]
legs = [[-7,1,-3,-6],[-2,1,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[0] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[0] = np.moveaxis(a0[0],2,0)
a0[0] = np.reshape(a0[0],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[0],full_matrices=False)
a0[0] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[0] = np.moveaxis(a0[0],0,2)
tensors = [np.diag(s) @ vh,a0[1]]
legs = [[-1,1],[1,-2,-3]]
a0[1] = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
l2dc = ncon(tensors,legs)
l2dc = l2dc[:,:,:,0,0,0]
tensors = [np.conj(a0[0]),cpd[0],a0[0]]
legs = [[-4,-1,1],[-5,-2,1,2],[-6,-3,2]]
lpdc = ncon(tensors,legs)
lpdc = lpdc[:,:,:,0,0,0]
for x in range(1,n-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
tensors = [l2dc,c2d[x],l2df[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[x],lpdf[x]]
legs = [[-1,1,-4],[1,2,-3,-6],[-2,2,-5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[x] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
a0[x] = np.moveaxis(a0[x],2,0)
a0[x] = np.reshape(a0[x],(d*bdpsi1,bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(u,(d,bdpsi1,np.shape(s)[0]),order='F')
a0[x] = np.moveaxis(a0[x],0,2)
tensors = [np.diag(s) @ vh,a0[x+1]]
legs = [[-1,1],[1,-2,-3]]
a0[x+1] = ncon(tensors,legs)
tensors = [l2dc,np.conj(a0[x]),c2d[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[x]),cpd[x],a0[x]]
legs = [[3,4,5],[3,-1,1],[4,-2,1,2],[5,-3,2]]
lpdc = ncon(tensors,legs)
bdpsi1,bdpsi2,d = np.shape(a0[n-1])
tensors = [l2dc,c2d[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
tensors = [lpdc,cpd[n-1]]
legs = [[-1,1,-4],[1,-7,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi1*bdpsi2*d,bdpsi1*bdpsi2*d),order='F')
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ a0v))
a0[n-1] = np.reshape(a0v,(bdpsi1,bdpsi2,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
for x in range(n-1,0,-1):
bdpsi1,bdpsi2,d = np.shape(a0[x])
a0[x] = np.moveaxis(a0[x],2,1)
a0[x] = np.reshape(a0[x],(bdpsi1,d*bdpsi2),order='F')
u,s,vh = np.linalg.svd(a0[x],full_matrices=False)
a0[x] = np.reshape(vh,(np.shape(s)[0],d,bdpsi2),order='F')
a0[x] = np.moveaxis(a0[x],1,2)
tensors = [a0[x-1],u @ np.diag(s)]
legs = [[-1,1,-3],[1,-2]]
a0[x-1] = ncon(tensors,legs)
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoMD_PBC_optm(c2d,cpd,a0,imprecision=10**-2):
"""
Optimization of FoMD over MPS for initial wave function. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for initial wave function, expected ndarray of a shape (bd,bd,d,n)
imprecision: expected imprecision of the end results, default value is 10**-2
Returns:
fomdval: optimal value of FoMD
a0: optimal MPS for initial wave function
"""
n = np.shape(c2d)[4]
d = np.shape(c2d)[2]
bdl2d = np.shape(c2d)[0]
bdlpd = np.shape(cpd)[0]
bdpsi = np.shape(a0)[0]
tol_fomd = 0.1*imprecision/n**2
if n == 1:
tensors = [c2d[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],np.eye(bdpsi),np.eye(bdpsi)]
legs = [[1,1,-3,-6],[-2,-1],[-5,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [np.eye(bdpsi),np.eye(bdpsi)]
legs = [[-2,-1],[-4,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomdval = np.real(fomdval[position])
else:
relunc_fomd = 0.1*imprecision
l2df = np.zeros((bdpsi,bdl2d,bdpsi,bdpsi,bdl2d,bdpsi,n-1),dtype=complex)
lpdf = np.zeros((bdpsi,bdlpd,bdpsi,bdpsi,bdlpd,bdpsi,n-1),dtype=complex)
psinormf = np.zeros((bdpsi,bdpsi,bdpsi,bdpsi,n-1),dtype=complex)
fomd = np.array([])
iter_fomd = 0
while True:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2df[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdf[:,:,:,:,:,:,n-2] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),a0[:,:,:,n-1]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormf[:,:,:,:,n-2] = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2df[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpdf[:,:,:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),a0[:,:,:,x],psinormf[:,:,:,:,x]]
legs = [[-1,2,1],[-2,3,1],[2,3,-3,-4]]
psinormf[:,:,:,:,x-1] = ncon(tensors,legs)
tensors = [c2d[:,:,:,:,0],l2df[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [cpd[:,:,:,:,0],lpdf[:,:,:,:,:,:,0]]
legs = [[2,1,-3,-6],[-2,1,-5,-1,2,-4]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormf[:,:,:,:,0]]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,0] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),a0[:,:,:,0]]
legs = [[-1,-3,1],[-2,-4,1]]
psinormc = ncon(tensors,legs)
for x in range(1,n-1):
tensors = [l2dc,c2d[:,:,:,:,x],l2df[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,x],lpdf[:,:,:,:,:,:,x]]
legs = [[3,4,5,-1,1,-4],[1,2,-3,-6],[-2,2,-5,3,4,5]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc,psinormf[:,:,:,:,x]]
legs = [[1,2,-1,-3],[-2,-4,1,2]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,x] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
tensors = [l2dc,np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
l2dc = ncon(tensors,legs)
tensors = [lpdc,np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x]]
legs = [[-1,-2,-3,3,4,5],[3,-4,1],[4,-5,1,2],[5,-6,2]]
lpdc = ncon(tensors,legs)
tensors = [psinormc,np.conj(a0[:,:,:,x]),a0[:,:,:,x]]
legs = [[-1,-2,2,3],[2,-3,1],[3,-4,1]]
psinormc = ncon(tensors,legs)
tensors = [l2dc,c2d[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
l2d = ncon(tensors,legs)
l2d = np.reshape(l2d,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [lpdc,cpd[:,:,:,:,n-1]]
legs = [[-2,2,-5,-1,1,-4],[1,2,-3,-6]]
lpd = ncon(tensors,legs)
lpd = np.reshape(lpd,(bdpsi*bdpsi*d,bdpsi*bdpsi*d),order='F')
tensors = [psinormc]
legs = [[-2,-4,-1,-3]]
psinorm = ncon(tensors,legs)
psinorm = np.reshape(psinorm,(bdpsi*bdpsi,bdpsi*bdpsi),order='F')
psinorm = (psinorm+np.conj(psinorm).T)/2
psinormpinv = np.linalg.pinv(psinorm,tol_fomd,hermitian=True)
psinormpinv = (psinormpinv+np.conj(psinormpinv).T)/2
psinormpinv = np.kron(np.eye(d),psinormpinv)
eiginput = 2*lpd-l2d
eiginput = (eiginput+np.conj(eiginput).T)/2
eiginput = psinormpinv @ eiginput
fomdval,a0v = np.linalg.eig(eiginput)
position = np.argmax(np.real(fomdval))
a0v = np.reshape(a0v[:,position],-1,order='F')
a0v = a0v/np.sqrt(np.abs(np.conj(a0v) @ np.kron(np.eye(d),psinorm) @ a0v))
a0[:,:,:,n-1] = np.reshape(a0v,(bdpsi,bdpsi,d),order='F')
fomd = np.append(fomd,np.real(fomdval[position]))
iter_fomd += 1
if iter_fomd >= 2 and all(fomd[-2*n:] > 0) and np.std(fomd[-2*n:])/np.mean(fomd[-2*n:]) <= relunc_fomd:
break
fomdval = fomd[-1]
return fomdval,a0
def fin_FoM_OBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with OBC.
Parameters:
a: MPO for density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
b: MPO for generalized derivative of density matrix, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
c: MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomval: value of FoM
"""
n = len(c)
if n == 1:
if np.shape(a[0])[0] == 1 and np.shape(b[0])[0] == 1 and np.shape(c[0])[0] == 1:
tensors = [c[0][0,0,:,:],b[0][0:,0,:,:]]
legs = [[1,2],[2,1]]
l1 = ncon(tensors,legs)
tensors = [c[0][0,0,:,:],[0][0,0,:,:],[0][0,0,:,:]]
legs = [[1,2],[2,3],[3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [c[n-1],b[n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
l1 = l1[:,:,0,0]
tensors = [c[n-1],a[n-1],c[n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
l2 = l2[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [c[x],b[x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
tensors = [c[x],a[x],c[x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
tensors = [c[0],b[0],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4]]
l1 = ncon(tensors,legs)
l1 = float(l1)
tensors = [c[0],a[0],c[0],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6]]
l2 = ncon(tensors,legs)
l2 = float(l2)
fomval = 2*l1-l2
return fomval
def fin_FoM_PBC_val(a,b,c):
"""
Calculate the value of FoM. Function for finite size systems with PBC.
Parameters:
a: MPO for a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
b: MPO for generalized derivative of a density matrix, expected ndarray of a shape (bd,bd,d,d,n)
c: MPO for the SLD, expected ndarray of a shape (bd,bd,d,d,n)
Returns:
fomval: value of FoM
"""
n = np.shape(a)[4]
if n == 1:
tensors = [c[:,:,:,:,0],b[:,:,:,:,0]]
legs = [[3,3,1,2],[4,4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0]]
legs = [[4,4,1,2],[5,5,2,3],[6,6,3,1]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
else:
tensors = [c[:,:,:,:,n-1],b[:,:,:,:,n-1]]
legs = [[-1,-3,1,2],[-2,-4,2,1]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,n-1],a[:,:,:,:,n-1],c[:,:,:,:,n-1]]
legs = [[-1,-4,1,2],[-2,-5,2,3],[-3,-6,3,1]]
l2 = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [c[:,:,:,:,x],b[:,:,:,:,x],l1]
legs = [[-1,3,1,2],[-2,4,2,1],[3,4,-3,-4]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,x],a[:,:,:,:,x],c[:,:,:,:,x],l2]
legs = [[-1,4,1,2],[-2,5,2,3],[-3,6,3,1],[4,5,6,-4,-5,-6]]
l2 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],b[:,:,:,:,0],l1]
legs = [[5,3,1,2],[6,4,2,1],[3,4,5,6]]
l1 = ncon(tensors,legs)
tensors = [c[:,:,:,:,0],a[:,:,:,:,0],c[:,:,:,:,0],l2]
legs = [[7,4,1,2],[8,5,2,3],[9,6,3,1],[4,5,6,7,8,9]]
l2 = ncon(tensors,legs)
fomval = 2*l1-l2
return fomval
def fin_FoMD_OBC_val(c2d,cpd,a0):
"""
Calculate value of FoMD. Function for finite size systems with OBC.
Parameters:
c2d: MPO for square of dual of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
cpd: MPO for dual of generalized derivative of SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
a0: MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d,d) (bd can vary between sites)
Returns:
fomdval: value of FoMD
"""
n = len(a0)
if n == 1:
if np.shape(c2d[0])[0] == 1 and np.shape(cpd[0])[0] == 1 and np.shape(a0[0])[0] == 1:
tensors = [np.conj(a0[0][0,0,:]),c2d[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[0][0,0,:]),cpd[0][0,0,:,:],a0[0][0,0,:]]
legs = [[1],[1,2],[2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
warnings.warn('Tensor networks with OBC and length one have to have bond dimension equal to one.')
else:
tensors = [np.conj(a0[n-1]),c2d[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
l2d = l2d[:,:,:,0,0,0]
tensors = [np.conj(a0[n-1]),cpd[n-1],a0[n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
lpd = lpd[:,:,:,0,0,0]
for x in range(n-2,0,-1):
tensors = [np.conj(a0[x]),c2d[x],a0[x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[x]),cpd[x],a0[x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[0]),c2d[0],a0[0],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
l2d = ncon(tensors,legs)
l2d = float(l2d)
tensors = [np.conj(a0[0]),cpd[0],a0[0],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5]]
lpd = ncon(tensors,legs)
lpd = float(lpd)
fomdval = 2*lpd-l2d
return fomdval
def fin_FoMD_PBC_val(c2d,cpd,a0):
"""
Calculate the value of FoMD. Function for finite size systems with PBC.
Parameters:
c2d: MPO for square of dual of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
cpd: MPO for dual of generalized derivative of the SLD, expected ndarray of a shape (bd,bd,d,d,n)
a0: MPS for the initial wave function, expected ndarray of a shape (bd,bd,d,n)
Returns:
fomdval: value of FoMD
"""
n = np.shape(c2d)[4]
if n == 1:
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0]]
legs = [[3,3,1],[4,4,1,2],[5,5,2]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
else:
tensors = [np.conj(a0[:,:,:,n-1]),c2d[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,n-1]),cpd[:,:,:,:,n-1],a0[:,:,:,n-1]]
legs = [[-1,-4,1],[-2,-5,1,2],[-3,-6,2]]
lpd = ncon(tensors,legs)
for x in range(n-2,0,-1):
tensors = [np.conj(a0[:,:,:,x]),c2d[:,:,:,:,x],a0[:,:,:,x],l2d]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,x]),cpd[:,:,:,:,x],a0[:,:,:,x],lpd]
legs = [[-1,3,1],[-2,4,1,2],[-3,5,2],[3,4,5,-4,-5,-6]]
lpd = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),c2d[:,:,:,:,0],a0[:,:,:,0],l2d]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
l2d = ncon(tensors,legs)
tensors = [np.conj(a0[:,:,:,0]),cpd[:,:,:,:,0],a0[:,:,:,0],lpd]
legs = [[6,3,1],[7,4,1,2],[8,5,2],[3,4,5,6,7,8]]
lpd = ncon(tensors,legs)
fomdval = 2*lpd-l2d
return fomdval
#################################################################
# 1.2.2 Problems with discrete approximation of the derivative. #
#################################################################
def fin2_FoM_FoMD_optbd(n,d,bc,ch,chp,epsilon,cini=None,a0ini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True,bdpsimax=100,alwaysbdpsimax=False):
"""
Iterative optimization of FoM/FoMD over SLD MPO and initial wave function MPS and also a check of convergence with increasing bond dimensions. Function for finite size systems. Version with two channels separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of the local Hilbert space (dimension of the physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
ch: MPO for a quantum channel at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
chp: MPO for a quantum channel at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d**2,d**2) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d**2,d**2,n) for PBC
epsilon: value of a separation between estimated parameters encoded in ch and chp, float
cini: initial MPO for the SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
a0ini: initial MPS for the initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if the maximal value of bd for SLD MPO has to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
bdpsimax: maximal value of bd for the initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if the maximal value of bd for initial wave function MPS has to be reached, otherwise False (default value)
Returns:
result: optimal value of FoM/FoMD
resultm: matrix describing FoM/FoMD as a function of bd of respectively SLD MPO [rows] and the initial wave function MPS [columns]
c: optimal MPO for SLD
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = np.zeros(d,dtype=complex)
for i in range(d):
a0[i] = np.sqrt(math.comb(d-1,i))*2**(-(d-1)/2) # prod
# a0[i] = np.sqrt(2/(d+1))*np.sin((1+i)*np.pi/(d+1)) # sine
if bc == 'O':
a0 = a0[np.newaxis,np.newaxis,:]
a0 = [a0]*n
elif bc == 'P':
a0 = a0[np.newaxis,np.newaxis,:,np.newaxis]
a0 = np.tile(a0,(1,1,1,n))
else:
a0 = a0ini
if bc == 'O':
bdpsi = max([np.shape(a0[i])[0] for i in range(n)])
a0 = [a0[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdpsi = np.shape(a0)[0]
a0 = a0.astype(complex)
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultm = np.zeros((bdlmax,bdpsimax),dtype=float)
resultm[bdl-1,bdpsi-1],c,a0 = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if bc == 'O' and n == 1:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
return result,resultm,c,a0
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
while True:
if bdpsi == bdpsimax:
break
else:
a0old = a0
bdpsi += 1
i = 0
while True:
a0 = fin_enlarge_bdpsi(a0,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-1,bdpsi-2]:
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdpsimax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-1,bdpsi-2]:
bdpsi += -1
a0 = a0old
a0copy = a0new
ccopy = cnew
break
else:
a0 = a0new
c = cnew
if problem:
break
if bdl == bdlmax:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
resultm[bdl-1,bdpsi-1],cnew,a0new = fin2_FoM_FoMD_optm(n,d,bc,c,a0,ch,chp,epsilon,imprecision,lherm)
if resultm[bdl-1,bdpsi-1] >= resultm[bdl-2,bdpsi-1]:
a0 = a0new
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultm[bdl-1,bdpsi-1] < (1+imprecision)*resultm[bdl-2,bdpsi-1]:
if bdpsi == bdpsimax:
resultm = resultm[0:bdl,0:bdpsi]
result = resultm[bdl-1,bdpsi-1]
else:
if resultm[bdl-1,bdpsi-1] < resultm[bdl-2,bdpsi]:
a0 = a0copy
c = ccopy
resultm = resultm[0:bdl,0:bdpsi+1]
bdl += -1
bdpsi += 1
result = resultm[bdl-1,bdpsi-1]
else:
resultm = resultm[0:bdl,0:bdpsi+1]
result = resultm[bdl-1,bdpsi-1]
break
if not(problem):
break
return result,resultm,c,a0
def fin2_FoM_optbd(n,d,bc,a,b,epsilon,cini=None,imprecision=10**-2,bdlmax=100,alwaysbdlmax=False,lherm=True):
"""
Optimization of FoM over SLD MPO and also check of convergence in bond dimension. Function for finite size systems. Version with two states separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
a: MPO for the density matrix at the value of estimated parameter phi=phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
b: MPO for the density matrix at the value of estimated parameter phi=phi_0+epsilon, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in a and b, float
cini: initial MPO for SLD, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdlmax: maximal value of bd for SLD MPO, default value is 100
alwaysbdlmax: boolean value, True if maximal value of bd for SLD MPO have to be reached, otherwise False (default value)
lherm: boolean value, True (default value) when Hermitian gauge is imposed on SLD MPO, otherwise False
Returns:
result: optimal value of FoM
resultv: vector describing FoM as a function of bd of the SLD MPO
c: optimal MPO for SLD
"""
while True:
if cini is None:
bdl = 1
rng = np.random.default_rng()
if bc == 'O':
c = [0]*n
c[0] = (rng.random((1,bdl,d,d)) + 1j*rng.random((1,bdl,d,d)))/bdl
c[0] = (c[0] + np.conj(np.moveaxis(c[0],2,3)))/2
for x in range(1,n-1):
c[x] = (rng.random((bdl,bdl,d,d)) + 1j*rng.random((bdl,bdl,d,d)))/bdl
c[x] = (c[x] + np.conj(np.moveaxis(c[x],2,3)))/2
c[n-1] = (rng.random((bdl,1,d,d)) + 1j*rng.random((bdl,1,d,d)))/bdl
c[n-1] = (c[n-1] + np.conj(np.moveaxis(c[n-1],2,3)))/2
elif bc == 'P':
c = (rng.random((bdl,bdl,d,d,n)) + 1j*rng.random((bdl,bdl,d,d,n)))/bdl
c = (c + np.conj(np.moveaxis(c,2,3)))/2
else:
c = cini
if bc == 'O':
bdl = max([np.shape(c[i])[0] for i in range(n)])
c = [c[i].astype(complex) for i in range(n)]
elif bc == 'P':
bdl = np.shape(c)[0]
c = c.astype(complex)
resultv = np.zeros(bdlmax,dtype=float)
if bc == 'O':
resultv[bdl-1],c = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
if n == 1:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
return result,resultv,c
elif bc == 'P':
resultv[bdl-1],c = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
factorv = np.array([0.5,0.25,0.1,1,0.01])
problem = False
while True:
if bdl == bdlmax:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
else:
bdl += 1
i = 0
while True:
c = fin_enlarge_bdl(c,factorv[i])
if bc == 'O':
resultv[bdl-1],cnew = fin2_FoM_OBC_optm(a,b,epsilon,c,imprecision,lherm)
elif bc == 'P':
resultv[bdl-1],cnew = fin2_FoM_PBC_optm(a,b,epsilon,c,imprecision,lherm)
if resultv[bdl-1] >= resultv[bdl-2]:
c = cnew
break
i += 1
if i == np.size(factorv):
problem = True
break
if problem:
break
if not(alwaysbdlmax) and resultv[bdl-1] < (1+imprecision)*resultv[bdl-2]:
resultv = resultv[0:bdl]
result = resultv[bdl-1]
break
if not(problem):
break
return result,resultv,c
def fin2_FoMD_optbd(n,d,bc,c2d,cd,cpd,epsilon,a0ini=None,imprecision=10**-2,bdpsimax=100,alwaysbdpsimax=False):
"""
Optimization of FoMD over initial wave function MPS and also check of convergence when increasing the bond dimension. Function for finite size systems. Version with two dual SLDs separated by epsilon.
Parameters:
n: number of sites in TN
d: dimension of local Hilbert space (dimension of physical index)
bc: boundary conditions, 'O' for OBC, 'P' for PBC
c2d: MPO for square of dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cd: MPO for dual of SLD at the value of estimated parameter phi=-phi_0, expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
cpd: MPO for dual of SLD at the value of estimated parameter phi=-(phi_0+epsilon), expected list of length n of ndarrays of a shape (bd,bd,d,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,d,n) for PBC
epsilon: value of a separation between estimated parameters encoded in cd and cpd, float
a0ini: initial MPS for initial wave function, expected list of length n of ndarrays of a shape (bd,bd,d) for OBC (bd can vary between sites), or ndarray of a shape (bd,bd,d,n) for PBC
imprecision: expected imprecision of the end results, default value is 10**-2
bdpsimax: maximal value of bd for initial wave function MPS, default value is 100
alwaysbdpsimax: boolean value, True if maximal value of bd for initial wave function MPS have to be reached, otherwise False (default value)
Returns:
result: optimal value of FoMD
resultv: vector describing FoMD in function of bd of initial wave function MPS
a0: optimal MPS for initial wave function
"""
while True:
if a0ini is None:
bdpsi = 1
a0 = | np.zeros(d,dtype=complex) | numpy.zeros |
"""The local histogram is computed using a sliding window similar to the method
described in [1]_.
Input image can be 8-bit or 16-bit with a value < 4096 (i.e. 12 bit), for 16-bit
input images, the number of histogram bins is determined from the maximum value
present in the image.
Result image is 8 or 16-bit with respect to the input image.
References
----------
.. [1] <NAME>. ,<NAME>. ; <NAME>.. "A fast two-dimensional
median filtering algorithm", IEEE Transactions on Acoustics, Speech and
Signal Processing, Feb 1979. Volume: 27 , Issue: 1, Page(s): 13 - 18.
"""
import numpy as np
from skimage import img_as_ubyte, img_as_uint
from skimage.filter.rank import _crank8, _crank16
from skimage.filter.rank.generic import find_bitdepth
__all__ = ['autolevel', 'bottomhat', 'equalize', 'gradient', 'maximum', 'mean',
'meansubtraction', 'median', 'minimum', 'modal', 'morph_contr_enh',
'pop', 'threshold', 'tophat', 'noise_filter', 'entropy', 'otsu']
def _apply(func8, func16, image, selem, out, mask, shift_x, shift_y):
selem = img_as_ubyte(selem > 0)
image = np.ascontiguousarray(image)
if mask is None:
mask = np.ones(image.shape, dtype=np.uint8)
else:
mask = np.ascontiguousarray(mask)
mask = img_as_ubyte(mask)
if image is out:
raise NotImplementedError("Cannot perform rank operation in place.")
is_8bit = image.dtype in (np.uint8, np.int8)
if func8 is not None and (is_8bit or func16 is None):
out = _apply8(func8, image, selem, out, mask, shift_x, shift_y)
else:
image = img_as_uint(image)
if out is None:
out = np.zeros(image.shape, dtype=np.uint16)
bitdepth = find_bitdepth(image)
if bitdepth > 11:
image = image >> 4
bitdepth = find_bitdepth(image)
func16(image, selem, shift_x=shift_x, shift_y=shift_y, mask=mask,
bitdepth=bitdepth + 1, out=out)
return out
def _apply8(func8, image, selem, out, mask, shift_x, shift_y):
if out is None:
out = | np.zeros(image.shape, dtype=np.uint8) | numpy.zeros |
import warnings
import numpy as np
import quaternionic
import pytest
algebra_pyufuncs = type('AlgebraPyufuncs', (object,), dict())()
quaternionic.utilities.pyguvectorize_module_functions(quaternionic.algebra, algebra_pyufuncs)
def test_basis_multiplication():
# Basis components
one, i, j, k = tuple(quaternionic.array(np.eye(4)))
# Full multiplication table
assert one * one == one
assert one * i == i
assert one * j == j
assert one * k == k
assert i * one == i
assert i * i == np.negative(one)
assert i * j == k
assert i * k == -j
assert j * one == j
assert j * i == -k
assert j * j == -one
assert j * k == i
assert k * one == k
assert k * i == j
assert k * j == -i
assert k * k == -one
# Standard expressions
assert one*one == one
assert i*i == -one
assert j*j == -one
assert k*k == -one
assert i*j*k == -one
def test_array_ufunc(array):
np.random.seed(1234)
q = array(np.random.normal(size=(1, 3, 4)))
with pytest.raises(NotImplementedError):
np.exp(q, extra_arg=True)
with pytest.raises(NotImplementedError):
np.negative.at(q, [0, 1])
# Addition
p = array(np.random.normal(size=(17, 3, 4)))
q = array(np.random.normal(size=(1, 3, 4)))
pq1 = np.add(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
assert np.array_equal(np.add(p.ndarray, q.ndarray), pq1.ndarray)
pq2 = array(np.empty((17, 3, 4)))
np.add(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
# Quaternion-scalar multiplication
p = array(np.random.normal(size=(17, 3, 4)))
q = np.random.rand(1, 3)
pq1 = np.multiply(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
pq2 = array(np.empty((17, 3, 4)))
np.multiply(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
pq3 = p * q
assert np.array_equal(pq1, pq3)
assert isinstance(pq3, array)
pq4 = p.copy()
pq4 *= q
assert np.array_equal(pq1, pq4)
assert isinstance(pq4, array)
pq5 = p.copy()
np.multiply(pq5, q, out=pq5)
assert np.array_equal(pq1, pq5)
assert isinstance(pq5, array)
# Scalar-quaternion multiplication
p = np.random.rand(1, 3)
q = array(np.random.normal(size=(17, 3, 4)))
pq1 = np.multiply(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
pq2 = array(np.empty((17, 3, 4)))
np.multiply(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
pq3 = p * q
assert np.array_equal(pq1, pq3)
assert isinstance(pq3, array)
pq4 = q.copy()
pq4 *= p
assert np.array_equal(pq1, pq4)
assert isinstance(pq4, array)
pq5 = q.copy()
np.multiply(p, pq5, out=pq5)
assert np.array_equal(pq1, pq5)
assert isinstance(pq5, array)
# Quaternion-quaternion multiplication
p = array(np.random.normal(size=(17, 3, 4)))
q = array(np.random.normal(size=(17, 3, 4)))
pq1 = np.multiply(p, q)
assert isinstance(pq1, array)
assert pq1.shape == (17, 3, 4)
pq2 = array(np.empty((17, 3, 4)))
np.multiply(p, q, out=pq2)
assert np.array_equal(pq1, pq2)
assert isinstance(pq2, array)
pq3 = p * q
assert np.array_equal(pq1, pq3)
assert isinstance(pq3, array)
pq4 = p.copy()
pq4 *= q
assert np.array_equal(pq1, pq4)
assert isinstance(pq4, array)
pq5 = p.copy()
np.multiply(pq5, q, out=pq5)
assert np.array_equal(pq1, pq5)
assert isinstance(pq5, array)
p = np.random.rand(1, 3)
q = np.random.normal(size=(17, 3, 4))
s = np.random.rand(17, 3)
pq1 = array(q).__array_ufunc__(np.multiply, "__call__", p, q)
assert pq1 == NotImplemented
qneg = array(q).__array_ufunc__(np.negative, "__call__", q)
assert qneg == NotImplemented
qabs = array(q).__array_ufunc__(np.absolute, "__call__", q)
assert qabs == NotImplemented
qs = array(q).__array_ufunc__(np.float_power, "__call__", q, s)
assert qs == NotImplemented
pq1 = array(q).__array_ufunc__(np.equal, "__call__", p, q)
assert pq1 == NotImplemented
qfin = array(q).__array_ufunc__(np.isfinite, "__call__", q)
assert qfin == NotImplemented
q = array(np.random.normal(size=(17, 3, 4)))
qneg = np.negative(q)
assert isinstance(qneg, array)
assert qneg.shape == q.shape
assert np.array_equal(np.negative(q.ndarray), qneg.ndarray)
qneg2 = np.empty(q.shape)
np.negative(q, out=qneg2)
assert np.array_equal(qneg, qneg2)
assert isinstance(qneg2, np.ndarray)
qneg2 = array(np.empty(q.shape))
np.negative(q, out=qneg2.ndarray)
assert np.array_equal(qneg, qneg2)
assert isinstance(qneg2, array)
qneg2 = array(np.empty(q.shape))
np.negative(q, out=qneg2)
assert np.array_equal(qneg, qneg2)
assert isinstance(qneg2, array)
p = np.random.rand(1, 3)
q = array(np.random.normal(size=(17, 3, 4)))
qp1 = np.float_power(q, p)
assert isinstance(qp1, array)
assert qp1.shape == (17, 3, 4)
qp2 = array(np.empty((17, 3, 4)))
np.float_power(q, p, out=qp2)
assert np.array_equal(qp1, qp2)
assert isinstance(qp2, array)
q = array(np.random.normal(size=(17, 3, 4)))
qabs = np.absolute(q)
assert isinstance(qabs, np.ndarray) and not isinstance(qabs, array)
assert qabs.shape == (17, 3)
qabs2 = np.empty((17, 3))
np.absolute(q, out=qabs2)
assert np.array_equal(qabs, qabs2)
assert isinstance(qabs2, np.ndarray) and not isinstance(qabs, array)
q = array(np.random.normal(size=(17, 3, 4, 4)))
qabs = array(np.empty((17, 3, 4)))
np.absolute(q, out=qabs)
assert np.array_equal(qabs, np.absolute(q))
assert isinstance(qabs2, np.ndarray) and isinstance(qabs, array)
p = array( | np.random.normal(size=(17, 3, 4)) | numpy.random.normal |
import numpy as np
import matplotlib.pyplot as pyplot
import scipy.spatial.distance as sd
import sys
import os
import copy
sys.path.append(os.path.dirname(os.getcwd())+"/code_material_python")
from helper import *
from graph_construction.generate_data import *
def build_similarity_graph(X, var=1, eps=0, k=0):
""" Computes the similarity matrix for a given dataset of samples.
Input
X:
(n x m) matrix of m-dimensional samples
k and eps:
controls the main parameter of the graph, the number
of neighbours k for k-nn, and the threshold eps for epsilon graphs
var:
the sigma value for the exponential function, already squared
Output
W:
(n x n) dimensional matrix representing the adjacency matrix of the graph
similarities:
(n x n) dimensional matrix containing
all the similarities between all points (optional output)
"""
assert eps + k != 0, "Choose either epsilon graph or k-nn graph"
if eps:
print("Constructing eps-Graph ...")
else:
print("Constructing k-NN Graph ...")
# euclidean distance squared between points
dists = sd.squareform(sd.pdist(X))**2
similarities = | np.exp(-dists/(2*var)) | numpy.exp |
import astropy.units as u
import numpy as np
from astropy.nddata import StdDevUncertainty
from ..spectra.spectrum1d import Spectrum1D
from ..spectra.spectrum_collection import SpectrumCollection
from ..analysis import template_comparison
from astropy.tests.helper import quantity_allclose
def test_template_match_no_overlap():
"""
Test template_match when both observed and template spectra have no overlap on the wavelength axis.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec_axis_no_overlap = np.linspace(51, 102, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis_no_overlap,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
# assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
assert np.isnan(tm_result[3])
def test_template_match_minimal_overlap():
"""
Test template_match when both observed and template spectra have minimal overlap on the wavelength axis.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec_axis_min_overlap = np.linspace(50, 100, 50) * u.AA
spec_axis[49] = 51.0 * u.AA
spec_axis_min_overlap[0] = 51.0 * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis_min_overlap,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
# assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
# TODO: investigate why the all elements in tm_result[1] are NaN even with overlap
assert np.isnan(tm_result[3])
def test_template_match_spectrum():
"""
Test template_match when both observed and template spectra have the same wavelength axis.
"""
# Seed np.random so that results are consistent
np.random.seed(42)
# Create test spectra
spec_axis = np.linspace(0, 50, 50) * u.AA
spec = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
spec1 = Spectrum1D(spectral_axis=spec_axis,
flux=np.random.randn(50) * u.Jy,
uncertainty=StdDevUncertainty(np.random.sample(50), unit='Jy'))
# Get result from template_match
tm_result = template_comparison.template_match(spec, spec1)
# Create new spectrum for comparison
spec_result = Spectrum1D(spectral_axis=spec_axis,
flux=spec1.flux * template_comparison._normalize_for_template_matching(spec, spec1))
assert quantity_allclose(tm_result[0].flux, spec_result.flux, atol=0.01*u.Jy)
np.testing.assert_almost_equal(tm_result[3], 40093.28353756253)
def test_template_match_with_resample():
"""
Test template_match when both observed and template spectra have different wavelength axis using resampling.
"""
np.random.seed(42)
# Create test spectra
spec_axis1 = np.linspace(0, 50, 50) * u.AA
spec_axis2 = | np.linspace(0, 50, 50) | numpy.linspace |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter_nd."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
GRADIENT_TESTS_DTYPES = (dtypes.float16, dtypes.float32, dtypes.float64)
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _FlatInnerDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape([
functools.reduce(lambda x, y: x * y, shape[:-ndims + 1], 1)
] + shape[-ndims + 1:])
def _FlatOuterDims(tensor, ndims=2):
shape = list(tensor.shape)
return tensor.reshape(shape[:ndims - 1] + [
functools.reduce(lambda x, y: x * y, shape[ndims - 1:], 1)
])
def _NumpyScatterNd(ref, indices, updates, op):
ixdim = indices.shape[-1]
num_updates = indices.size // ixdim
total_nd = len(ref.shape)
slice_size = 1
for i in range(ixdim, total_nd):
slice_size *= ref.shape[i]
flat_indices = _FlatInnerDims(indices)
flat_updates = updates.reshape((num_updates, slice_size))
output_flat = _FlatOuterDims(ref, ixdim + 1)
for ix_updates, ix_output in enumerate(flat_indices):
ix_output = tuple(ix_output)
output_flat[ix_output] = op(output_flat[ix_output],
flat_updates[ix_updates])
return output_flat.reshape(ref.shape)
def _NumpyUpdate(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: u)
def _NumpyAdd(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p + u)
def _NumpySub(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p - u)
def _NumpyMul(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p * u)
def _NumpyDiv(ref, indices, updates):
return _NumpyScatterNd(ref, indices, updates, lambda p, u: p / u)
class StatefulScatterNdTest(test.TestCase):
def _VariableRankTest(self,
np_scatter,
tf_scatter,
vtype,
itype,
repeat_indices=False):
np.random.seed(8)
ref_shapes = [(3, 6), (3, 6), (3, 6, 9), (3, 6, 9), (3, 6, 9), (3, 6, 9)]
indices_shapes = [(2,), (2, 2), (2,), (2, 2), (2, 3), (2, 3, 3)]
with self.cached_session(use_gpu=True):
for ref_shape, indices_shape in zip(ref_shapes, indices_shapes):
num_updates = indices_shape[0]
ixdim = indices_shape[-1]
indexable_area_shape = ()
for i in range(ixdim):
indexable_area_shape += (ref_shape[i],)
all_indices = [
list(coord)
for coord, _ in np.ndenumerate(
np.empty(indexable_area_shape, vtype))
]
np.random.shuffle(all_indices)
indices = np.array(all_indices[:num_updates])
if num_updates > 1 and repeat_indices:
indices = indices[:num_updates // 2]
for _ in range(num_updates - num_updates // 2):
indices = np.append(
indices, [indices[np.random.randint(num_updates // 2)]], axis=0)
np.random.shuffle(indices)
indices = _AsType(indices[:num_updates], itype)
updates_shape = (num_updates,)
for i in range(ixdim, len(ref_shape)):
updates_shape += (ref_shape[i],)
updates = _AsType(np.random.randn(*(updates_shape)), vtype)
ref = _AsType(np.random.randn(*(ref_shape)), vtype)
# Scatter via numpy
new = ref.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref_var = variables.VariableV1(ref)
ref_var.initializer.run()
tf_scatter(ref_var, indices, updates).eval()
tol = 1e-03 if repeat_indices and vtype == np.float16 else 1e-06
# Compare
self.assertAllClose(new, self.evaluate(ref_var), atol=tol, rtol=tol)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64, np.complex64,
np.complex128):
for itype in (np.int32, np.int64):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype)
def testSimple(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = variables.Variable([0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testSimpleResource(self):
indices = constant_op.constant([[4], [3], [1], [7]], dtype=dtypes.int32)
updates = constant_op.constant([9, 10, 11, 12], dtype=dtypes.float32)
ref = resource_variable_ops.ResourceVariable(
[0, 0, 0, 0, 0, 0, 0, 0], dtype=dtypes.float32)
expected = np.array([0, 11, 0, 10, 9, 0, 0, 12])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
self.evaluate(scatter)
self.assertAllClose(ref.eval(), expected)
def testSimple2(self):
indices = constant_op.constant([[1, 0], [1, 1]], dtype=dtypes.int32)
updates = constant_op.constant([11., 12.], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
def testSimple3(self):
indices = constant_op.constant([[1]], dtype=dtypes.int32)
updates = constant_op.constant([[11., 12.]], dtype=dtypes.float32)
ref = variables.Variable(
[[0., 0.], [0., 0.], [0., 0.]], dtype=dtypes.float32)
expected = np.array([[0., 0.], [11., 12.], [0., 0.]])
scatter = state_ops.scatter_nd_update(ref, indices, updates)
init = variables.global_variables_initializer()
with self.session(use_gpu=True) as sess:
self.evaluate(init)
result = self.evaluate(scatter)
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testVariableRankUpdate(self):
self._VariableRankTests(_NumpyUpdate, state_ops.scatter_nd_update)
@test_util.run_deprecated_v1
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, state_ops.scatter_nd_add)
@test_util.run_deprecated_v1
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul.
# def testVariableRankMul(self):
# self._VariableRankTests(_NumpyMul, state_ops.scatter_nd_mul)
# TODO(ebrevdo): Re-enable when we need ScatterNdDiv.
# def testVariableRankDiv(self):
# self._VariableRankTests(_NumpyDiv, state_ops.scatter_nd_div)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.int32, np.float16, np.float32, np.float64):
for itype in (np.int32, np.int64):
self._VariableRankTest(
np_scatter, tf_scatter, vtype, itype, repeat_indices=True)
@test_util.run_v1_only("b/120545219")
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, state_ops.scatter_nd_add)
self._ScatterRepeatIndicesTest(_NumpySub, state_ops.scatter_nd_sub)
# TODO(ebrevdo): Re-enable when we need ScatterNdMul and ScatterNdDiv.
# self._ScatterRepeatIndicesTest(_NumpyMul, state_ops.scatter_nd_mul)
# self._ScatterRepeatIndicesTest(_NumpyDiv, state_ops.scatter_nd_div)
# TODO(simister): Re-enable once binary size increase due to
# extra templating is back under control and this op is re-enabled
# def testBooleanScatterUpdate(self):
# with self.session(use_gpu=False) as session:
# var = tf.Variable([True, False])
# update0 = tf.compat.v1.scatter_nd_update(var, [[1]], [True])
# update1 = tf.compat.v1.scatter_nd_update(
# var, tf.constant(
# [[0]], dtype=tf.int64), [False])
# var.initializer.run()
# session.run([update0, update1])
# self.assertAllEqual([False, True], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testScatterOutOfRangeCpu(self):
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.cached_session(use_gpu=False):
ref = variables.VariableV1(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([[2], [0], [5]])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([[-1], [0], [5]])
with self.assertRaisesOpError(
r"indices\[0\] = \[-1\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
indices = np.array([[2], [0], [6]])
with self.assertRaisesOpError(
r"indices\[2\] = \[6\] does not index into shape \[6\]"):
op(ref, indices, updates).eval()
def testRank3ValidShape(self):
indices = array_ops.zeros([2, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
self.assertAllEqual(
state_ops.scatter_nd_update(ref, indices,
updates).get_shape().as_list(), shape)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Error messages differ
def testResVarInvalidOutputShape(self):
res = variables.Variable(
initial_value=lambda: array_ops.zeros(shape=[], dtype=dtypes.float32),
dtype=dtypes.float32)
with self.cached_session():
res.initializer.run()
with self.assertRaisesOpError("Output must be at least 1-D"):
state_ops.scatter_nd_update(res, [[0]], [0.22]).eval()
@test_util.run_deprecated_v1
def testExtraIndicesDimensions(self):
indices = array_ops.zeros([1, 1, 2], dtypes.int32)
updates = array_ops.zeros([1, 1], dtypes.int32)
shape = np.array([2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
scatter_update = state_ops.scatter_nd_update(ref, indices, updates)
self.assertAllEqual(scatter_update.get_shape().as_list(), shape)
expected_result = np.zeros([2, 2], dtype=np.int32)
with self.cached_session():
ref.initializer.run()
self.assertAllEqual(expected_result, self.evaluate(scatter_update))
@test_util.run_deprecated_v1
def testRank3InvalidShape1(self):
indices = array_ops.zeros([3, 2, 2], dtypes.int32)
updates = array_ops.zeros([2, 2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The outer \d+ dimensions of indices\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testRank3InvalidShape2(self):
indices = array_ops.zeros([2, 2, 1], dtypes.int32)
updates = array_ops.zeros([2, 2], dtypes.int32)
shape = np.array([2, 2, 2])
ref = variables.Variable(array_ops.zeros(shape, dtypes.int32))
with self.assertRaisesWithPredicateMatch(
ValueError, r"The inner \d+ dimensions of input\.shape="):
state_ops.scatter_nd_update(ref, indices, updates)
@test_util.run_deprecated_v1
def testConcurrentUpdates(self):
num_updates = 10000
update_values = np.random.rand(num_updates)
ref = variables.Variable(np.zeros([2, 2]), dtype=dtypes.float64)
indices = constant_op.constant([[0, 1]] * num_updates, dtype=dtypes.int32)
updates = constant_op.constant(update_values, dtype=dtypes.float64)
expected_result = np.zeros([2, 2], dtype=np.float64)
expected_result[0, 1] = np.sum(update_values)
scatter = state_ops.scatter_nd_add(ref, indices, updates)
init = variables.global_variables_initializer()
with session.Session() as sess:
self.evaluate(init)
result = self.evaluate(scatter)
assert np.allclose(result, expected_result)
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not test.IsBuiltWithCuda():
return
# TODO(simister): Re-enable once binary size increase due to
# scatter_nd ops is under control.
# tf.scatter_nd_mul, tf.scatter_nd_div,
for op in (state_ops.scatter_nd_add, state_ops.scatter_nd_sub,
state_ops.scatter_nd_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.cached_session(force_gpu=True):
ref = variables.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indices out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = | np.array([2, 0, 6]) | numpy.array |
import numpy as np
smooth = 0.01
from medpy.metric import hd95,assd
'''
0/1的二值化mask可以用这个求解
'''
def calculate_binary_dice(y_true, y_pred,thres=0.5):
y_true=np.squeeze(y_true)
y_pred=np.squeeze(y_pred)
y_true=np.where(y_true>thres,1,0)
y_pred=np.where(y_pred>thres,1,0)
return dc(y_pred,y_true)
def calculate_binary_hd(y_true, y_pred,thres=0.5,spacing=[1,1,1]):
y_true=np.squeeze(y_true)
y_pred=np.squeeze(y_pred)
y_true= | np.where(y_true>thres,1,0) | numpy.where |
"""This Module contains basic Contextual Multi-Armed Bandit Algorithms."""
import copy
import math
import random
from abc import ABC, abstractmethod
import numpy as np
from pandas import DataFrame, Series
from scipy.stats import norm
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
class MABInterface(ABC):
"""Abstract base class for various Multi-Armed Bandit Algorithms."""
@abstractmethod
def select_arm(self) -> None:
"""Decide which arm should be selected."""
pass
@abstractmethod
def update(self) -> None:
"""Update the information about the arms."""
pass
@abstractmethod
def batch_update(self) -> None:
"""Update the information about the arms."""
pass
class LinUCB(MABInterface):
"""Linear Upper Confidence Bound Algorithm for Contextual Multi-Armed Bandit Problem.
References
-------
[1] <NAME>, <NAME>, John, and <NAME>.:
A contextual-bandit approach to personalized news article recommendation.
In Proceedings of the 19th International Conference on World Wide Web, pp. 661–670. ACM, 2010.
"""
def __init__(self, n_arms: int, feature_dim: int, alpha: float =1.0, warmup: int =15, batch_size: int=0) -> None:
"""Initialize class.
:param n_arms: the number of given arms.
:param feature_dim: dimentions of context matrix.
:param alpha: the hyper-parameter which represents how often the algorithm explore.
:param warmup: how many times the algorithms randomly explore arms at first.
:param batch_size: the size of information about rewards given in a update.
"""
self.n_arms = n_arms
self.feature_dim = feature_dim
self.warmup = warmup
self.alpha = alpha
self.theta = [copy.deepcopy(np.zeros(self.feature_dim)) for i in np.arange(n_arms)] # d * 1
self.A_inv = [copy.deepcopy(np.matrix(np.identity(self.feature_dim))) for i in np.arange(self.n_arms)] # d * d
self.b = [copy.deepcopy(np.matrix(np.zeros(self.feature_dim)).T) for i in np.arange(self.n_arms)] # d * 1
self.data_size = 0
self.batch_size = batch_size
self._A_inv = [copy.deepcopy(np.matrix(np.identity(self.feature_dim))) for i in np.arange(self.n_arms)] # d * d
self._b = [copy.deepcopy(np.matrix(np.zeros(self.feature_dim)).T) for i in np.arange(self.n_arms)] # d * 1
self.counts = np.zeros(self.n_arms, dtype=int)
self.rewards = 0
def select_arm(self, x: np.matrix) -> int:
"""Decide which arm should be selected.
:param x: observed context matrix.
:return: index of the selected arm.
"""
if True in (self.counts < self.warmup):
result = np.where(self.counts < self.warmup)[0][0]
else:
ucb_values = np.zeros(self.n_arms)
self.theta = np.concatenate([self.A_inv[i].dot(self.b[i]) for i in np.arange(self.n_arms)], axis=1) # user_dim * n_arms
mu_hat = self.theta.T.dot(x) # n_arms * 1
sigma_hat = self.alpha * np.concatenate([np.sqrt(x.T.dot(self.A_inv[i].dot(x))) for i in np.arange(self.n_arms)], axis=0) # n_arms * 1
result = np.argmax(mu_hat + sigma_hat)
return result
def update(self, x: np.matrix, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms.
:param x: observed context matrix.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.counts[chosen_arm] += 1
self.rewards += reward
self.A_inv[chosen_arm] -= self.A_inv[chosen_arm].dot(x.dot(x.T.dot(self.A_inv[chosen_arm]))) / (1 + x.T.dot(self.A_inv[chosen_arm].dot(x)))
self.b[chosen_arm] += x * reward # d * 1
def batch_update(self, x: np.matrix, chosen_arm: int, reward: float) -> None:
"""Update the information about the arms with a new batch of data.
:param x: observed context matrix.
:param chosen_arm: index of the chosen arm.
:param reward: reward from the chosen arm.
"""
self.data_size += 1
self.counts[chosen_arm] += 1
self.rewards += reward
self._A_inv[chosen_arm] -= self._A_inv[chosen_arm].dot(x.dot(x.T.dot(self._A_inv[chosen_arm]))) / (1 + x.T.dot(self._A_inv[chosen_arm].dot(x))) # d * d
self._b[chosen_arm] += x * reward # d * 1
if self.data_size % self.batch_size == 0:
self.A_inv = copy.deepcopy(self._A_inv) # d * d
self.b = copy.deepcopy(self._b) # d * 1
class HybridLinUCB(MABInterface):
"""Hybrid Linear Upper Confidence Bound Algorithm for Contextual Multi-Armed Bandit Problem.
References
-------
[1] <NAME>, <NAME>, Langford, John, and Schapire, <NAME>.:
A contextual-bandit approach to personalized news article recommendation.
In Proceedings of the 19th International Conference on World Wide Web, pp. 661–670. ACM, 2010.
"""
def __init__(self, n_arms: int, z_dim: int, x_dim: int, alpha: float =1.0, warmup: int =15, batch_size: int=0) -> None:
"""Initialize class.
:param n_arms: the number of given arms.
:param z_dim: dimensions of context matrix which is common to all arms.
:param x_dim: dimentions of context matrix which is unique to earch arm.
:param alpha: the hyper-parameter which represents how often the algorithm explore.
:param warmup: how many times the algorithms randomly explore arms at first.
:param batch_size: the size of information about rewards given in a update.
"""
self.n_arms = n_arms
self.z_dim = z_dim # k
self.x_dim = x_dim # d
self.warmup = warmup
self.alpha = alpha
self.beta = np.zeros(self.z_dim)
self.theta = None # d * 1
# matrices which are common to all context
self.A_zero = np.matrix(np.identity(self.z_dim)) # k * k
self.b_zero = np.matrix(np.zeros(self.z_dim)).T # k * 1
# matrices which are different for each context
self.A_inv = [copy.deepcopy(np.matrix(np.identity(self.x_dim))) for i in np.arange(self.n_arms)]
self.B = [copy.deepcopy(np.matrix(np.zeros((self.x_dim, self.z_dim)))) for i in range(self.n_arms)] # d * k
self.b = [copy.deepcopy(np.matrix(np.zeros(self.x_dim)).T) for i in range(self.n_arms)] # d * 1
self.data_size = 0
self.batch_size = batch_size
self._A_zero = np.matrix(np.identity(self.z_dim)) # k * k
self._b_zero = np.matrix(np.zeros(self.z_dim)).T # k * 1
self._A_inv = [copy.deepcopy(np.matrix(np.identity(self.x_dim))) for i in range(self.n_arms)] # d * d
self._B = [copy.deepcopy(np.matrix(np.zeros((self.x_dim, self.z_dim)))) for i in range(self.n_arms)] # d * k
self._b = [copy.deepcopy(np.matrix(np.zeros(self.x_dim)).T) for i in range(self.n_arms)] # d * 1
self.counts = np.zeros(self.n_arms, dtype=int)
self.rewards = 0
def select_arm(self, x: np.matrix) -> int:
"""Decide which arm should be selected.
:param x: observed context matrix.
:return: index of the selected arm.
"""
z = x[:][:self.z_dim]
x = x[:][self.z_dim:]
if True in (self.counts < self.warmup):
result = np.where(self.counts < self.warmup)[0][0]
else:
ucb_values = np.zeros(self.n_arms)
self.beta = np.linalg.inv(self.A_zero).dot(self.b_zero) # k * 1
self.theta = [self.A_inv[i].dot(self.b[i] - self.B[i].dot(self.beta)).A.reshape(self.x_dim) for i in np.arange(self.n_arms)] # d * 1
mu_hat = [z.T.dot(self.beta) + x.T.dot(self.theta[i]) for i in np.arange(self.n_arms)]
s1 = z.T.dot(np.linalg.inv(self.A_zero)).dot(z).A[0]
s2 = - 2 * np.array([z.T.dot(np.linalg.inv(self.A_zero)).dot(self.B[i].T).dot(self.A_inv[i]).dot(x) for i in np.arange(self.n_arms)])
s3 = np.array([x.T.dot(self.A_inv[i]).dot(x) for i in np.arange(self.n_arms)])
s4 = np.array([x.T.dot(self.A_inv[i]).dot(self.B[i]).dot(np.linalg.inv(self.A_zero)).dot(
self.B[i].T).dot(self.A_inv[i]).dot(x) for i in np.arange(self.n_arms)])
sigma_hat = s1 + s2 + s3 + s4
ucb_values = mu_hat + self.alpha * sigma_hat
result = | np.argmax(ucb_values) | numpy.argmax |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.