prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
from jams.const import eps
def zacharias(h, clay, sand, db, params=None, thetar=False, thetas=False, lnalpha=False, n=False):
"""
Soil water content with the van Genuchten equation and
the pedotransfer functions of Zacharias et al. (2007).
Definition
----------
def zacharias(h, clay, sand, db, params=None, thetar=False, thetas=False, lnalpha=False, n=False):
Input
-----
h pressure head, scalar or array [cm], 0=saturation, 15000=wilting point
clay clay content, scalar or array [%, i.e. 0-100]
sand sand content, scalar or array [%, i.e. 0-100]
db bulk density, scalar or array [g/cm3], quartz=2.65
Optional Input
--------------
params Parameter for Zacharias et al. (2007) pedotransfer functions
If None, values from Zacharias et al. will be taken that are different
between sandy and non-sandy soil (<66.5% sand)
Options
-------
thetar If True, outputs residual water content thetar as well [m3 m-3]
thetas If True, outputs saturation water content thetas as well [m3 m-3]
lnalpha If True, outpus logarithm of shape parameter alpha as well [1/cm]
n If True, output exponent n as well
Output
------
Soil water content theta [m^3 m^-3]
Restrictions
------------
Does not check the validity of the parameter set, i.e. negative soil moistures
can occur, for example.
Use zacharias_check to check the parameter set first.
Examples
--------
>>> h = np.array([0.0000000, 0.0000000, 10.000000, 31.622777,
... 100.00000, 199.52623, 199.52623,
... 501.18723, 2511.8864, 15848.932])
>>> sand = np.array([12.800000, 61.600000, 17.200000, 85.800000,
... 16.500000, 12.800000, 61.600000,
... 17.200000, 85.800000, 16.500000])
>>> clay = np.array([30.500000, 17.200000, 25.500000, 8.9000000,
... 28.100000, 30.500000, 17.200000,
... 25.500000, 8.9000000, 28.100000])
>>> rho = np.array([1.2100000, 1.3400000, 1.4600000, 1.6300000,
... 1.3000000, 1.2100000, 1.3400000,
... 1.4600000, 1.6300000, 1.3000000])
>>> from autostring import astr
>>> print(astr(zacharias(h, clay, sand, rho),3,pp=True))
['0.500' '0.453' '0.421' '0.245' '0.393' '0.381' '0.285' '0.313' '0.039' '0.221']
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2016 <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written, MC, Jun 2012
Modified, MC, Feb 2013 - ported to Python 3
MC, Nov 2016 - const.tiny -> const.eps
"""
#
# Check input
ih = np.where(h==0., eps, h)
if np.any(ih < 0.) | np.any(ih > 1e6):
raise ValueError('h must be >=0 and <= 1e6 (=pf6)')
iclay = np.where(clay==0., eps, clay)
if np.any(iclay < 0.) | np.any(iclay > 100.):
raise ValueError('clay must be >=0 and <= 100.')
isand = np.where(sand==0., eps, sand)
if np.any(isand < 0.) | np.any(isand > 100.):
raise ValueError('sand must be >=0 and <= 100.')
idb = np.array(db)
if np.any(idb < 0.) | np.any(idb > 2.65):
raise ValueError('db must be >=0 and <= 2.65.')
nn = np.size(isand)
if (np.size(iclay) != nn) | (np.size(idb) != nn) | (np.size(ih) != nn):
raise ValueError('h, sand, clay, and db must have the same sizes.')
if params is not None:
if np.size(params) != 15:
raise ValueError('size(params) must be 15.')
# save output shape
ns = np.shape(isand)
iclay = np.ravel(iclay)
isand = np.ravel(isand)
idb = np.ravel(idb)
# Take right params
par0 = np.empty(nn)
par1 = np.empty(nn)
par2 = np.empty(nn)
par3 = np.empty(nn)
par4 = np.empty(nn)
par5 = np.empty(nn)
par6 = np.empty(nn)
par7 = | np.empty(nn) | numpy.empty |
#!/usr/bin/python3
"""
sys.argv[1] - input database file
sys.argv[2] - output mat file
Composed by <NAME> @THU_IVG
Last revision: <NAME> @THU_IVG @Oct 3rd, 2019 CST
"""
import json
import scipy.io as sio
import numpy as np
import itertools
import sys
db_f = sys.argv[1]
with open(db_f) as f:
database = json.load(f)["database"]
steps = list(sorted(set(itertools.chain.from_iterable(
int(an["id"]) for an in itertools.chain.from_iterable(
v["annotation"] for v in database.values())))))
min_id = steps[0]
nb_step = len(steps)
init_dist = np.zeros((nb_step,))
frequency_mat = np.zeros((nb_step, nb_step))
for v in database:
if database[v]["subset"]!="training":
continue
for i, an in enumerate(database[v]["annotation"]):
if i==0:
init_dist[int(an["id"])-min_id] += 1
else:
frequency_mat[int(pan["id"])-min_id, int(an["id"])-min_id] += 1
pan = an
normalized_init_dist = init_dist/np.sum(init_dist)
frequency_mat_sum = np.sum(frequency_mat, axis=1)
normalized_frequency_mat = | np.copy(frequency_mat) | numpy.copy |
import sys
import logging
def calc_ticks(image, dz=1, dx=1):
"""
calculate axial and lateral mesh using image dimensions and input values
for axial and lateral sampling intervals
:param image: input image for display
:param dz: axial sampling interval
:param dx: lateral sampling interval
:return: axi, lat (np.array)
"""
import numpy as np
image = np.array(image)
xdim, zdim = image.shape
# calculate lateral axis with origin (0,0) in the center
if xdim % 2:
x = np.array([dx*ii
for ii in range(int(-(xdim-1)/2), int((xdim-1)/2+1))])
else:
x = np.array([dx*(ii+0.5)
for ii in range(int(-xdim/2), int(xdim/2))])
# calculate axial axis
z = np.array([dz*ii for ii in range(0, zdim)])
# generate mesh using calculated axial and lateral axes
axi, lat = np.meshgrid(z, x, indexing='xy')
msg = '[calc_ticks] Image mesh generated.'
print(msg)
logging.debug(msg)
return axi, lat
def calc_b_geometry(fs, beam_spacing, c=1540., units='cm'):
"""
calculate b-mode sample spacing with user specified units
:param fs: rf sampling frequency (Hz)
:param c: speed of sound (m/s)
:param beam_spacing: spacing between lateral beams (m)
:param units: units of output values
:return: dz, dx (float)
"""
if units == 'cm':
scale = 100.
elif units == 'mm':
scale = 1000.
elif units == 'm':
scale = 1.
else:
msg = 'ERROR [calc_b_geometry] Invalid unit type specified. Exiting ' \
'script...'
logging.error(msg)
print(msg)
sys.exit()
c = float(c)
beam_spacing = float(beam_spacing)
fs = float(fs)
# calculate axial and lateral pixel spacings
dz = scale*c*1/fs/2
dx = scale*beam_spacing
logging.info('Axial tick size: ' + str(dz) + ' ' + units)
logging.info('Lateral tick size: ' + str(dx) + ' ' + units)
return dz, dx
def create_dir(filepath):
"""
create new folder if directory in file path does not exist
:param filepath: file path and name
"""
import os
out_dir = os.path.dirname(filepath)
if not os.path.exists(out_dir):
try:
os.makedirs(out_dir)
except:
msg = 'ERROR [create_dir] Invalid output path ' + out_dir + \
'. Exiting script...'
print(msg)
logging.error(msg)
sys.exit()
def generate_image(image, dz=1, dx=1, dynamic_range=[0, 1], hist_eq=False,
post_proc=False, z_label='z', x_label='x',
filename='./bmode.png', save_flag=True,
display_flag=False):
"""
display/save output image with user-specified dynamic range
:param image: input image for display
:param dz: axial sampling interval
:param dx: lateral sampling interval
:param dynamic_range: displayed dynamic range
:param hist_eq: enable to perform histogram equalization
:param post_proc: apply image post-processing
:param z_label: label for z (axial) axis
:param x_label: label for x (lateral) axis
:param filename: file path and name of saved .png
:param save_flag: enable to save .png
:param display_flag: enable to display image
"""
if not display_flag:
import matplotlib
matplotlib.use('Agg')
msg = 'WARNING [generate_image] Using Agg matplotlib backend.'
print(msg)
logging.warning(msg)
import matplotlib.pyplot as plt
import numpy as np
uint16_scale = 65535
# generate lat and axi meshes based on pixel spacing (dz and dx)
axi, lat = calc_ticks(image, dz, dx)
if dynamic_range[0] > dynamic_range[1]:
tmp = dynamic_range
dynamic_range[0] = tmp[1]
dynamic_range[1] = tmp[0]
msg = 'WARNING [generate_image] Dynamic range bounds out of order. ' \
'Reversing bounds for display...'
print(msg)
logging.warning(msg)
# perform post-processing on full image
if post_proc:
from skimage import filters
msg = '[generate_image] Performing image post-processing...'
logging.debug(msg)
print(msg)
raw = image
image = filters.gaussian(raw, sigma=0.75)
# clip image bounds based on specified dynamic range
if dynamic_range[0] < np.min(image):
dynamic_range[0] = np.min(image)
image = np.clip(image, dynamic_range[0], dynamic_range[1])
# perform histogram equalization on clipped and normalized image
if hist_eq:
from skimage import exposure
msg = '[generate_image] Performing histogram equalization...'
logging.debug(msg)
print(msg)
image += np.abs(dynamic_range[0])
image /= | np.abs(dynamic_range[0]) | numpy.abs |
import cv2
import base as bs
import numpy as np
def binarize(img, thr):
ret = np.zeros(img.shape, dtype='uint8')
ret[img >= thr] = 255
return ret
def conv_to_gray(img):
return (0.0722 * img[:, :, 0] + 0.7152 * img[:, :, 1] + 0.2126 * img[:, :, 2]).astype(np.uint8)
def Otsu_binarize(img):
img = img.astype(np.float)
thres_set = range(255)
argmin_thr = 0
max_sigma_b = None
for thr in thres_set:
c_1 = np.array(img[img <= thr])
c_2 = np.array(img[img > thr])
w_1 = len(c_1)
w_2 = len(c_2)
if(len(c_1) == 0 or len(c_2) == 0):
continue
else:
tmp_sigma_b = w_1 * w_2 * np.power(np.mean(c_1) - | np.mean(c_2) | numpy.mean |
"""
The ``surface`` and ``curve`` classes are used to define non-rectangular geometries for simulation
blocks. Every block in a simulation can accept instances of the ``surface`` or ``curve`` class to
override the rectangular geometry defined by the lower left coordinate and block length. Any
block edge that is not defined by a surface or curve will automatically take on the rectangular
geometry, so you only need to define surfaces or curves for edges that do not match the
rectangular geometry defined in this way.
The ``surface`` and ``curve`` classes rely on Numpy arrays to hold the coordinate values of
the block edges. In particular, each surface holds three (n1,n2)-shaped arrays ``x``, ``y``, and ``z``
that define the coordinates of the surface. Curves similarly hold two (n1,1) arrays ``x`` and ``y``
(``z`` is defined but holds a single point and is not used).
Surfaces are defined by providing the number of grid points, normal direction in the computational
space (a string ``'x'``, ``'y'``, or ``'z'``), and the arrays ``x``, ``y``, and ``z`` holding the coordinates.
An example of how to define a surface in the ``'z'`` direction is as follows:
>>> import fdfault
>>> import numpy as np
>>> nx = 401
>>> ny = 201
>>> x = np.linspace(0., 40., nx)
>>> y = np.linspace(0., 20., ny)
>>> xm, ym = np.meshgrid(x, y, index='ij')
>>> z = np.exp(-(x-20.)**2/5.-(y-10.)**2/5.)
>>> surf = fdfault.surface(nx, ny, 'z', xm, ym, z)
Note that this example uses a uniform grid spacing on the x and y coordinates. This is not required,
and you are free to use irregular grid spacing as long as the resulting 3D grid meets the smoothness
requirements imposed by the numerical method (precisely, the metric tensor for the grid must have
a positive Jacobian everywhere).
Similarly, you can define a curve, though fewer arguments are needed:
>>> import fdfault
>>> import numpy as np
>>> nx = 401
>>> x = np.linspace(0., 40., nx)
>>> y = np.sin(np.pi*x/40.)
>>> curv = fdfault.curve(nx, 'y', x, y)
Once the surfaces or curves are created, you can use the ``set_block_surf`` method of a problem
to set the bounding surfaces or curves of a given block. The necessary binary files holding
the coordinates will be automatically written to file when the ``write_input``method of the problem
is called.
In addition to these basic classes, the code also has several functions designed to generate
surfaces and curves from more basic information. These include ``points_to_curve``, which
generates a curve connecting two points with a uniform grid spacing, and ``curves_to_surf``,
which generates a surface bounded by four 3D curves using transfinite interpolation.
"""
from __future__ import division, print_function
import numpy as np
class surface(object):
'''
The surface class represents a surface for defining interfaces and block boundaries
Each surface contains the following attributes:
:ivar n1: Number of grid points in the first spatial direction (x unless the interface is an ``'x'`` interface)
:type n1: int
:ivar n2: Number of grid points in the second spatial direction (z unless the interface is a ``'z'`` interface)
:type n1: int
:ivar direction: Normal direction in computational space
:type direction: str
:ivar x: Numpy array holding x coordinates, must have shape (n1,n2)
:type x: ndarray
:ivar y: Numpy array holding y coordinates, must have shape (n1,n2)
:type y: ndarray
:ivar z: Numpy array holding z coordinates, must have shape (n1,n2)
:type z: ndarray
'''
def __init__(self, n1, n2, direction, x, y, z):
'''
Initialize a ``surface`` instance
Required arguments are n1 and n2, which are number of grid points in each direction,
a direction which indicates the surface orientation in computational space (``'x'``, ``'y'``,
or ``'z'``), plus three arrays x, y, and z (must have shape ``(n1, n2)`` that hold the
coordinates for the new surface. Initializing with a negative number of grid points, with
arrays that do not have the correct shape, or with a bad string for the surface orientation
will result in an error.
:param n1: Number of grid points along first dimension
:type n1: int
:param n2: Number of grid points along second dimension
:type n2: int
:param direction: String indicating surface normal direction in computational space
(must be ``'x'``, ``'y'``, or ``'z'``)
:type direction: str
:param x: Array holding surface x coordinates (must have shape ``(n1, n2)``)
:type x: ndarray
:param y: Array holding surface y coordinates (must have shape ``(n1, n2)``)
:type y: ndarray
:param z: Array holding surface z coordinates (must have shape ``(n1, n2)``)
:type z: ndarray
:returns: New surface with specified properties
:rtype: surface
'''
assert(direction == 'x' or direction == 'y' or direction == 'z')
assert(n1 > 0)
assert(n2 > 0)
n1 = int(n1)
n2 = int(n2)
self.n1 = n1
self.n2 = n2
self.direction = direction
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
assert (n1, n2) == self.x.shape, "x must have shape (n1, n2)"
assert (n1, n2) == self.y.shape, "y must have shape (n1, n2)"
assert (n1, n2) == self.z.shape, "z must have shape (n1, n2)"
def get_direction(self):
"""
Returns approximate normal direction
:returns: Normal direction in computational space
:rtype: str
"""
return self.direction
def get_n1(self):
'''
Returns number of grid points along first dimension
:returns: Number of grid points along first dimension (x unless interface direction is ``'x'``)
:rtype: int
'''
return self.n1
def get_n2(self):
'''
Returns number of grid points along second dimension
:returns: Number of grid points along second dimension (z unless interface direction is ``'z'``)
:rtype: int
'''
return self.n2
def get_x(self, i=None):
'''
Returns x coordinate array
if no argument is provided, the method returns the entire array. Otherwise, ``i`` must
be a valid index tuple for the array.
:param i: Index tuple (must be a valid index into the array). Optional, if not provided or
if ``None`` is given, this returns the entire array.
:type i: tuple or None
:returns: Value of x coordinate
:rtype: ndarray or float
'''
if i is None:
return self.x
else:
return self.x[i]
def get_y(self, i=None):
'''
Returns y coordinate array
if no argument is provided, the method returns the entire array. Otherwise, ``i`` must
be a valid index tuple for the array.
:param i: Index tuple (must be a valid index into the array). Optional, if not provided or
if ``None`` is given, this returns the entire array.
:type i: tuple or None
:returns: Value of y coordinate
:rtype: ndarray or float
'''
if i is None:
return self.y
else:
return self.y[i]
def get_z(self, i=None):
'''
Returns z coordinate array
if no argument is provided, the method returns the entire array. Otherwise, ``i`` must
be a valid index tuple for the array.
:param i: Index tuple (must be a valid index into the array). Optional, if not provided or
if ``None`` is given, this returns the entire array.
:type i: tuple or None
:returns: Value of z coordinate
:rtype: ndarray or float
'''
if i is None:
return self.z
else:
return self.z[i]
def __eq__(self, othersurf):
'''
compares two surfaces, returns boolean indicating if all coordinates are identical
'''
return (np.allclose(self.get_x(), othersurf.get_x()) and np.allclose(self.get_y(), othersurf.get_y())
and np.allclose(self.get_z(), othersurf.get_z()))
def has_same_edge(self, edge1, edge2, othersurf):
'''
Compares the edges of two surfaces
The method compares the edges of two surfaces, using the indices 0-3 to indicate the
edges (one argument must be provided for each surface)
* 0 means edge where second index is zero
* 1 means edge where first index is zero
* 2 means edge where second index is n2-1
* 3 means edge where first index is n1-1
Returns a boolean.
:param edge1: Edge of first surface to be used. Must be integer 0-3
:type edge1: int
:param edge2: Edge of second surface to be used. Must be integer 0-3
:type edge2: int
:param othersurf: The second surface, must be a surface
:type othersurf: surface
:returns: Whether or not the selected edges match
:rtype: bool
'''
assert type(edge1) is int and edge1 >= 0 and edge1 < 4, "edge1 out of range"
assert type(edge2) is int and edge2 >= 0 and edge2 < 4, "edge2 out of range"
assert type(othersurf) is surface
if (edge1%2 == 0):
if (edge1 == 0):
edge1index = 0
else:
edge1index = self.get_n2()-1
if (edge2%2 == 0):
if (edge2 == 0):
edge2index = 0
else:
edge2index = othersurf.get_n2()-1
return (np.allclose(self.get_x()[:,edge1index], othersurf.get_x()[:,edge2index])
and np.allclose(self.get_y()[:,edge1index], othersurf.get_y()[:,edge2index])
and np.allclose(self.get_z()[:,edge1index], othersurf.get_z()[:,edge2index]))
else:
if (edge2 == 1):
edge2index = 0
else:
edge2index = othersurf.get_n1()-1
return (np.allclose(self.get_x()[:,edge1index], othersurf.get_x()[edge2index,:])
and np.allclose(self.get_y()[:,edge1index], othersurf.get_y()[edge2index,:])
and np.allclose(self.get_z()[:,edge1index], othersurf.get_z()[edge2index,:]))
else:
if (edge1 == 1):
edge1index = 0
else:
edge1index = self.get_n1()-1
if (edge2%2 == 0):
if (edge2 == 0):
edge2index = 0
else:
edge2index = othersurf.get_n2()-1
return (np.allclose(self.get_x()[edge1index,:], othersurf.get_x()[:,edge2index])
and np.allclose(self.get_y()[edge1index,:], othersurf.get_y()[:,edge2index])
and np.allclose(self.get_z()[edge1index,:], othersurf.get_z()[:,edge2index]))
else:
if (edge2 == 1):
edge2index = 0
else:
edge2index = othersurf.get_n1()-1
return (np.allclose(self.get_x()[edge1index,:], othersurf.get_x()[edge2index,:])
and np.allclose(self.get_y()[edge1index,:], othersurf.get_y()[edge2index,:])
and np.allclose(self.get_z()[edge1index,:], othersurf.get_z()[edge2index,:]))
def write(self, filename, endian = '='):
'''
Write surface to binary file
Method writes the surface to a binary file. Input arguments include the desired filename
(required) and the byte ordering of the file (``'='`` native, ``'>'`` big endian, ``'<'`` little endian;
default is native)
:param filename: Filename for output
:type filename: str
:param endian: Byte ordering of output (optional, default is native)
:type endian: str
:returns: None
'''
assert(endian == '=' or endian == '>' or endian == '<'), "bad value for endianness"
f = open(filename, 'wb')
f.write(self.get_x().astype(endian+'f8').tobytes())
f.write(self.get_y().astype(endian+'f8').tobytes())
f.write(self.get_z().astype(endian+'f8').tobytes())
f.close()
def __str__(self):
'''
returns string representation of surface for printing
'''
return 'Surface with normal direction ' + self.direction +', n1 = ' + str(self.n1) + ', n2 = ' + str(self.n2)
class curve3d(surface):
"""
The curve3d clas represents a curve in 3 dimensions. Used only to define flat surfaces more easily
curve3d is simply a curve with n2 = 1
"""
def __init__(self, n1, direction, x, y, z):
'''
Initialize a ``surface`` instance
Required arguments are n1 and n2, which are number of grid points in each direction,
a direction which indicates the surface orientation in computational space (``'x'``, ``'y'``,
or ``'z'``), plus three arrays x, y, and z (must have shape ``(n1, n2)`` that hold the
coordinates for the new surface. Initializing with a negative number of grid points, with
arrays that do not have the correct shape, or with a bad string for the surface orientation
will result in an error.
:param n1: Number of grid points along first dimension
:type n1: int
:param n2: Number of grid points along second dimension
:type n2: int
:param direction: String indicating surface normal direction in computational space
(must be ``'x'``, ``'y'``, or ``'z'``)
:type direction: str
:param x: Array holding surface x coordinates (must have shape ``(n1, n2)``)
:type x: ndarray
:param y: Array holding surface y coordinates (must have shape ``(n1, n2)``)
:type y: ndarray
:param z: Array holding surface z coordinates (must have shape ``(n1, n2)``)
:type z: ndarray
:returns: New surface with specified properties
:rtype: surface
'''
n1 = int(n1)
x = np.reshape(np.array(x), (n1,1))
y = np.reshape( | np.array(y) | numpy.array |
import math
import numpy as np
import scipy.ndimage as nd
from pyCudaImageWarp import cudaImageWarp
"""
Pad the image to have a singleton channel dimension.
"""
def __pad_channel__(im):
ndim = 3
return np.expand_dims(im, ndim) if len(im.shape) < ndim + 1 else im
"""
As __pad_channel__, but for shapes rather than arrays.
"""
def __shape_pad_channel__(shape):
ndim = 3
if len(shape) < ndim + 1:
shape = shape + (1,) * (ndim + 1 - len(shape))
return shape
"""
Adjust the translation component of an affine transform so that it maps
'point' to 'target'. Does not change the linear component.
"""
def set_point_target_affine(mat, point, target):
mat = mat.astype(float)
mat[0:3, 3] = target - mat[0:3, 0:3].dot(point[np.newaxis].T).T
return mat
def jitter_mask(labels, pQuit=0.5, maxIter=1, pKeep=0.5, pJagged=0.5):
"""
Slightly modify a set of labels, with randomness. Only modifies the
image mask, that is, the labels less than zero. Jitters the perimeter
"""
# With probability pQuit, do nothing at all
if np.random.uniform() <= pQuit:
return labels
# Do nothing if all the labels are valid
invalid = labels == -1
if not np.any(invalid):
return labels
# Randomly draw the number of iterations
iters = int(round(np.random.uniform(low=1, high=maxIter)))
# Erode or dilate smoothly
if np.random.uniform() > pJagged:
if np.random.uniform() > 0.5:
invalid = nd.morphology.binary_erosion(invalid, iterations=iters)
else:
invalid = nd.morphology.binary_dilation(invalid, iterations=iters)
else:
# Jitter the boundary in each iteration
for i in range(iters):
# Chose whether to erode or dilate
if np.random.uniform() > 0.5:
new = nd.morphology.binary_erosion(invalid)
else:
new = nd.morphology.binary_dilation(invalid)
# Get the difference and randomly choose whether to keep them
diff = new ^ invalid
invalid[diff] = np.random.uniform(size=(np.sum(diff),)) <= pKeep
# Return the result
result = np.zeros_like(labels)
result[invalid] = -1
result[~invalid] = labels[~invalid]
return result
def get_translation_affine(offset):
"""
Returns a 4x4 affine matrix (homogeneous coordinates) shifting by the
given offset.
"""
mat = np.eye(4)
mat[0:3, 3] = offset
return mat
"""
Check that the image shape is compatible with the xform shape, up to ndim.
Ignores channels unless they're >1.
"""
def __check_shapes__(imShape, xformShape, ndim=3):
hasChannels = len(imShape) > ndim and imShape[ndim] > 1
if hasChannels and xformShape[ndim] != imShape[ndim]:
raise ValueError("Output shape has %d channels, while input has %d" % \
(xformShape[3], imShape[3]))
if len(xformShape[:ndim]) != len(imShape[:ndim]):
raise ValueError("""
Input and output shapes have mismatched number of dimensions.
Input: %s, Output: %s"
""" % (xformShape, imShape))
def __shape_center__(shape):
return (np.array(shape[:3]) - 1.0) / 2.0
def __crop_uniform__(im_center, crop_half_range):
"""
Choose uniformly between valid crops. For compatibility with more
complicated methods, returns the displacement and object center, which
is just the center of the image in this case.
"""
crop_offset = np.random.uniform(low=-crop_half_range, high=crop_half_range)
crop_center = im_center + crop_offset
return crop_center, crop_center
def __crop_in_mask__(crop_half_range, mask, printFun=None):
"""
Crop only in this mask, if at all possible. Returns the displacement
the center of the object around which we're trying to crop.
"""
return __sparse_crop_in_mask__(crop_half_range, mask.shape,
np.flatnonzero(mask), printFun)
def __sparse_crop_in_mask__(crop_half_range, in_shape, inds, printFun=None):
# Compute shape parameters
im_center = __shape_center__(in_shape)
# Check if the mask is empty
if len(inds) == 0:
if printFun is not None:
printFun("Defaulting to uniform crop...")
return __crop_uniform__(im_center, crop_half_range)
# Pick a random center in the range
center_idx = np.random.choice(inds)
object_center = np.array(np.unravel_index(center_idx, in_shape))
# Pick the valid crop with a center closest to this one (clamps coordinates)
object_disp = object_center - im_center
crop_disp = np.minimum(
crop_half_range,
np.maximum(object_disp, -crop_half_range)
)
crop_center = im_center + crop_disp
return crop_center, object_center
"""
Randomly generates a 3D affine map based on the given parameters. Then
applies the map to warp the input image and, optionally, the segmentation.
Warping is done on the GPU using pyCudaImageWarp. By default, the output
shape is the same as that of the input image.
By default, the function only generates the identity map. The affine
transform distribution is controlled by the following parameters:
inShape - The shape of the input image.
seg - The input segmentation, same shape as im (optional).
outShape - The output shape (optional).
init - The initial linear transform. Defaults to identity.
rotMax - Uniform rotation about (x,y,z) axes. For example, (10,10,10)
means +-10 degrees in about each axis.
pReflect - Chance of reflecting about (x,y,z) axis. For example,
(.5, 0, 0) means there is a 50% chance of reflecting about the
x-axis.
shearMax - Uniform shearing about each axis. For example, (1.1, 1.1,
1.1) shears in each axis in the range (1.1, 1 / 1.1)
transMax - Uniform translation in each coordinate. For example, (10, 10,
10) translates by at most +-10 voxels in each coordinate.
otherScale - Gaussian-distributed affine transform. This controls the
variance of each parameter.
randomCrop - Choose whether to randomly crop the image. Possible modes:
'none' - Do no cropping (default).
'uniform' - All crops are equally likely.
'valid' - Like uniform, but only for crops with non-negative label.
'nonzero' - Choose only from crops whose centers have a positive
label. Cannot be used if seg is None.
noiseLevel - An array of C elements. Decide the amount of noise for each channel
using this standard deviation.
windowMin - A 2xC matrix, where C is the number of channels in the image,
from which the lower window threshold is sampled uniformly. By
default, this does nothing. The cth row defines the limits for the
cth channel.
windowMax - A matrix from which the upper window threshold is
sampled uniformly. Same format as winMin. By default, this does
nothing.
occludeProb - Probability that we randomly take out a chunk of out of
the image.
oob_label - The label assigned to out-of-bounds pixels (default: 0)
printFun - If provided, use this function to print the parameters.
oob_image_val - If provided, set out-of-bounds voxels to this value.
api - The underlying computation platform. Either 'cuda' or 'scipy'.
device - The index of the CUDA device, if provided.
All transforms fix the center of the image, except for translation.
"""
def get_xform(inShape, seg=None, outShape=None, randSeed=None,
rotMax=(0, 0, 0), pReflect=(0, 0, 0), init=np.eye(3),
shearMax=(1,1,1), transMax=(0,0,0), otherScale=0, randomCrop='none',
noiseLevel=None, windowMin=None, windowMax=None,
occludeProb=0.0, printFun=None):
# Default to the same output as input shape
if outShape is None:
outShape = inShape
# Pad the shapes with missing dimensions
inShape = __shape_pad_channel__(inShape)
outShape = __shape_pad_channel__(outShape)
numChannels = outShape[-1]
# Check that the input and output shapes are compatible
__check_shapes__(inShape, outShape)
# Set the random seed, if specified
if randSeed is not None:
np.random.seed(randSeed)
# ---Randomly generate the desired transforms, in homogeneous coordinates---
# Draw the noise level
if noiseLevel is not None:
noiseScale = [np.abs( | np.random.normal(scale=n) | numpy.random.normal |
import time
import os
import datetime
import pathlib
import json
import cv2
import carla
from leaderboard.autoagents import autonomous_agent
from team_code.planner import RoutePlanner
import numpy as np
from PIL import Image, ImageDraw
SAVE_PATH = os.environ.get('SAVE_PATH', None)
class BaseAgent(autonomous_agent.AutonomousAgent):
def setup(self, path_to_conf_file):
self.track = autonomous_agent.Track.SENSORS
self.config_path = path_to_conf_file
self.step = -1
self.wall_start = time.time()
self.initialized = False
self._sensor_data = {
'width': 400,
'height': 300,
'fov': 100
}
self._3d_bb_distance = 50
self.weather_id = None
self.save_path = None
if SAVE_PATH is not None:
now = datetime.datetime.now()
string = pathlib.Path(os.environ['ROUTES']).stem + '_'
string += '_'.join(map(lambda x: '%02d' % x, (now.month, now.day, now.hour, now.minute, now.second)))
print (string)
self.save_path = pathlib.Path(os.environ['SAVE_PATH']) / string
self.save_path.mkdir(parents=True, exist_ok=False)
for sensor in self.sensors():
if hasattr(sensor, 'save') and sensor['save']:
(self.save_path / sensor['id']).mkdir()
(self.save_path / '3d_bbs').mkdir(parents=True, exist_ok=True)
(self.save_path / 'affordances').mkdir(parents=True, exist_ok=True)
(self.save_path / 'measurements').mkdir(parents=True, exist_ok=True)
(self.save_path / 'lidar').mkdir(parents=True, exist_ok=True)
(self.save_path / 'semantic_lidar').mkdir(parents=True, exist_ok=True)
(self.save_path / 'topdown').mkdir(parents=True, exist_ok=True)
for pos in ['front', 'left', 'right', 'rear']:
for sensor_type in ['rgb', 'seg', 'depth', '2d_bbs']:
name = sensor_type + '_' + pos
(self.save_path / name).mkdir()
def _init(self):
self._command_planner = RoutePlanner(7.5, 25.0, 257)
self._command_planner.set_route(self._global_plan, True)
self.initialized = True
self._sensor_data['calibration'] = self._get_camera_to_car_calibration(self._sensor_data)
self._sensors = self.sensor_interface._sensors_objects
def _get_position(self, tick_data):
gps = tick_data['gps']
gps = (gps - self._command_planner.mean) * self._command_planner.scale
return gps
def sensors(self):
return [
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_front'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_front'
},
{
'type': 'sensor.camera.depth',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_front'
},
{
'type': 'sensor.camera.rgb',
'x': -1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_rear'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': -1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_rear'
},
{
'type': 'sensor.camera.depth',
'x': -1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 180.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_rear'
},
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_left'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_left'
},
{
'type': 'sensor.camera.depth',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': -60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_left'
},
{
'type': 'sensor.camera.rgb',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'rgb_right'
},
{
'type': 'sensor.camera.semantic_segmentation',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'seg_right'
},
{
'type': 'sensor.camera.depth',
'x': 1.3, 'y': 0.0, 'z': 2.3,
'roll': 0.0, 'pitch': 0.0, 'yaw': 60.0,
'width': self._sensor_data['width'], 'height': self._sensor_data['height'], 'fov': self._sensor_data['fov'],
'id': 'depth_right'
},
{
'type': 'sensor.lidar.ray_cast',
'x': 1.3, 'y': 0.0, 'z': 2.5,
'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,
'id': 'lidar'
},
{
'type': 'sensor.lidar.ray_cast_semantic',
'x': 1.3, 'y': 0.0, 'z': 2.5,
'roll': 0.0, 'pitch': 0.0, 'yaw': -90.0,
'id': 'semantic_lidar'
},
{
'type': 'sensor.other.imu',
'x': 0.0, 'y': 0.0, 'z': 0.0,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'sensor_tick': 0.05,
'id': 'imu'
},
{
'type': 'sensor.other.gnss',
'x': 0.0, 'y': 0.0, 'z': 0.0,
'roll': 0.0, 'pitch': 0.0, 'yaw': 0.0,
'sensor_tick': 0.01,
'id': 'gps'
},
{
'type': 'sensor.speedometer',
'reading_frequency': 20,
'id': 'speed'
}
]
def tick(self, input_data):
self.step += 1
affordances = self._get_affordances()
traffic_lights = self._find_obstacle('*traffic_light*')
stop_signs = self._find_obstacle('*stop*')
depth = {}
seg = {}
bb_3d = self._get_3d_bbs(max_distance=self._3d_bb_distance)
bb_2d = {}
for pos in ['front', 'left', 'right', 'rear']:
seg_cam = 'seg_' + pos
depth_cam = 'depth_' + pos
_segmentation = np.copy(input_data[seg_cam][1][:, :, 2])
depth[pos] = self._get_depth(input_data[depth_cam][1][:, :, :3])
self._change_seg_tl(_segmentation, depth[pos], traffic_lights)
self._change_seg_stop(_segmentation, depth[pos], stop_signs, seg_cam)
bb_2d[pos] = self._get_2d_bbs(seg_cam, affordances, bb_3d, _segmentation)
#self._draw_2d_bbs(_segmentation, bb_2d[pos])
seg[pos] = _segmentation
rgb_front = cv2.cvtColor(input_data['rgb_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_rear = cv2.cvtColor(input_data['rgb_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_left = cv2.cvtColor(input_data['rgb_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
rgb_right = cv2.cvtColor(input_data['rgb_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
gps = input_data['gps'][1][:2]
speed = input_data['speed'][1]['speed']
compass = input_data['imu'][1][-1]
depth_front = cv2.cvtColor(input_data['depth_front'][1][:, :, :3], cv2.COLOR_BGR2RGB)
depth_left = cv2.cvtColor(input_data['depth_left'][1][:, :, :3], cv2.COLOR_BGR2RGB)
depth_right = cv2.cvtColor(input_data['depth_right'][1][:, :, :3], cv2.COLOR_BGR2RGB)
depth_rear = cv2.cvtColor(input_data['depth_rear'][1][:, :, :3], cv2.COLOR_BGR2RGB)
weather = self._weather_to_dict(self._world.get_weather())
return {
'rgb_front': rgb_front,
'seg_front': seg['front'],
'depth_front': depth_front,
'2d_bbs_front': bb_2d['front'],
'rgb_rear': rgb_rear,
'seg_rear': seg['rear'],
'depth_rear': depth_rear,
'2d_bbs_rear': bb_2d['rear'],
'rgb_left': rgb_left,
'seg_left': seg['left'],
'depth_left': depth_left,
'2d_bbs_left': bb_2d['left'],
'rgb_right': rgb_right,
'seg_right': seg['right'],
'depth_right': depth_right,
'2d_bbs_right': bb_2d['right'],
'lidar' : input_data['lidar'][1],
'semantic_lidar': input_data['semantic_lidar'][1],
'gps': gps,
'speed': speed,
'compass': compass,
'weather': weather,
'affordances': affordances,
'3d_bbs': bb_3d
}
def save(self, near_node, far_node, near_command, steer, throttle, brake, target_speed, tick_data):
frame = self.step // 10
pos = self._get_position(tick_data)
theta = tick_data['compass']
speed = tick_data['speed']
weather = tick_data['weather']
data = {
'x': pos[0],
'y': pos[1],
'theta': theta,
'speed': speed,
'target_speed': target_speed,
'x_command': far_node[0],
'y_command': far_node[1],
'command': near_command.value,
'steer': steer,
'throttle': throttle,
'brake': brake,
'weather': weather,
'weather_id': self.weather_id,
'near_node_x': near_node[0],
'near_node_y': near_node[1],
'far_node_x': far_node[0],
'far_node_y': far_node[1],
'is_vehicle_present': self.is_vehicle_present,
'is_pedestrian_present': self.is_pedestrian_present,
'is_red_light_present': self.is_red_light_present,
'is_stop_sign_present': self.is_stop_sign_present,
'should_slow': self.should_slow,
'should_brake': self.should_brake,
'angle': self.angle,
'angle_unnorm': self.angle_unnorm,
'angle_far_unnorm': self.angle_far_unnorm,
}
measurements_file = self.save_path / 'measurements' / ('%04d.json' % frame)
f = open(measurements_file, 'w')
json.dump(data, f, indent=4)
f.close()
for pos in ['front', 'left', 'right', 'rear']:
name = 'rgb_' + pos
Image.fromarray(tick_data[name]).save(self.save_path / name / ('%04d.png' % frame))
for sensor_type in ['seg', 'depth']:
name = sensor_type + '_' + pos
Image.fromarray(tick_data[name]).save(self.save_path / name / ('%04d.png' % frame))
for sensor_type in ['2d_bbs']:
name = sensor_type + '_' + pos
np.save(self.save_path / name / ('%04d.npy' % frame), tick_data[name], allow_pickle=True)
Image.fromarray(tick_data['topdown']).save(self.save_path / 'topdown' / ('%04d.png' % frame))
np.save(self.save_path / 'lidar' / ('%04d.npy' % frame), tick_data['lidar'], allow_pickle=True)
np.save(self.save_path / 'semantic_lidar' / ('%04d.npy' % frame), tick_data['semantic_lidar'], allow_pickle=True)
np.save(self.save_path / '3d_bbs' / ('%04d.npy' % frame), tick_data['3d_bbs'], allow_pickle=True)
np.save(self.save_path / 'affordances' / ('%04d.npy' % frame), tick_data['affordances'], allow_pickle=True)
def _weather_to_dict(self, carla_weather):
weather = {
'cloudiness': carla_weather.cloudiness,
'precipitation': carla_weather.precipitation,
'precipitation_deposits': carla_weather.precipitation_deposits,
'wind_intensity': carla_weather.wind_intensity,
'sun_azimuth_angle': carla_weather.sun_azimuth_angle,
'sun_altitude_angle': carla_weather.sun_altitude_angle,
'fog_density': carla_weather.fog_density,
'fog_distance': carla_weather.fog_distance,
'wetness': carla_weather.wetness,
'fog_falloff': carla_weather.fog_falloff,
}
return weather
def _create_bb_points(self, bb):
"""
Returns 3D bounding box world coordinates.
"""
cords = np.zeros((8, 4))
extent = bb[1]
loc = bb[0]
cords[0, :] = np.array([loc[0] + extent[0], loc[1] + extent[1], loc[2] - extent[2], 1])
cords[1, :] = np.array([loc[0] - extent[0], loc[1] + extent[1], loc[2] - extent[2], 1])
cords[2, :] = np.array([loc[0] - extent[0], loc[1] - extent[1], loc[2] - extent[2], 1])
cords[3, :] = np.array([loc[0] + extent[0], loc[1] - extent[1], loc[2] - extent[2], 1])
cords[4, :] = np.array([loc[0] + extent[0], loc[1] + extent[1], loc[2] + extent[2], 1])
cords[5, :] = np.array([loc[0] - extent[0], loc[1] + extent[1], loc[2] + extent[2], 1])
cords[6, :] = np.array([loc[0] - extent[0], loc[1] - extent[1], loc[2] + extent[2], 1])
cords[7, :] = np.array([loc[0] + extent[0], loc[1] - extent[1], loc[2] + extent[2], 1])
return cords
def _translate_tl_state(self, state):
if state == carla.TrafficLightState.Red:
return 0
elif state == carla.TrafficLightState.Yellow:
return 1
elif state == carla.TrafficLightState.Green:
return 2
elif state == carla.TrafficLightState.Off:
return 3
elif state == carla.TrafficLightState.Unknown:
return 4
else:
return None
def _get_affordances(self):
# affordance tl
affordances = {}
affordances["traffic_light"] = None
affecting = self._vehicle.get_traffic_light()
if affecting is not None:
for light in self._traffic_lights:
if light.id == affecting.id:
affordances["traffic_light"] = self._translate_tl_state(self._vehicle.get_traffic_light_state())
affordances["stop_sign"] = self._affected_by_stop
return affordances
def _get_3d_bbs(self, max_distance=50):
bounding_boxes = {
"traffic_lights": [],
"stop_signs": [],
"vehicles": [],
"pedestrians": []
}
bounding_boxes['traffic_lights'] = self._find_obstacle_3dbb('*traffic_light*', max_distance)
bounding_boxes['stop_signs'] = self._find_obstacle_3dbb('*stop*', max_distance)
bounding_boxes['vehicles'] = self._find_obstacle_3dbb('*vehicle*', max_distance)
bounding_boxes['pedestrians'] = self._find_obstacle_3dbb('*walker*', max_distance)
return bounding_boxes
def _get_2d_bbs(self, seg_cam, affordances, bb_3d, seg_img):
"""Returns a dict of all 2d boundingboxes given a camera position, affordances and 3d bbs
Args:
seg_cam ([type]): [description]
affordances ([type]): [description]
bb_3d ([type]): [description]
Returns:
[type]: [description]
"""
bounding_boxes = {
"traffic_light": list(),
"stop_sign": list(),
"vehicles": list(),
"pedestrians": list()
}
if affordances['stop_sign']:
baseline = self._get_2d_bb_baseline(self._target_stop_sign)
bb = self._baseline_to_box(baseline, seg_cam)
if bb is not None:
bounding_boxes["stop_sign"].append(bb)
if affordances['traffic_light'] is not None:
baseline = self._get_2d_bb_baseline(self._vehicle.get_traffic_light(), distance=8)
tl_bb = self._baseline_to_box(baseline, seg_cam, height=.5)
if tl_bb is not None:
bounding_boxes["traffic_light"].append({
"bb": tl_bb,
"state": self._translate_tl_state(self._vehicle.get_traffic_light_state())
})
for vehicle in bb_3d["vehicles"]:
trig_loc_world = self._create_bb_points(vehicle).T
cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(seg_cam), False)
cords_x_y_z = np.array(cords_x_y_z)[:3, :]
veh_bb = self._coords_to_2d_bb(cords_x_y_z)
if veh_bb is not None:
if np.any(seg_img[veh_bb[0][1]:veh_bb[1][1],veh_bb[0][0]:veh_bb[1][0]] == 10):
bounding_boxes["vehicles"].append(veh_bb)
for pedestrian in bb_3d["pedestrians"]:
trig_loc_world = self._create_bb_points(pedestrian).T
cords_x_y_z = self._world_to_sensor(trig_loc_world, self._get_sensor_position(seg_cam), False)
cords_x_y_z = np.array(cords_x_y_z)[:3, :]
ped_bb = self._coords_to_2d_bb(cords_x_y_z)
if ped_bb is not None:
if np.any(seg_img[ped_bb[0][1]:ped_bb[1][1],ped_bb[0][0]:ped_bb[1][0]] == 4):
bounding_boxes["pedestrians"].append(ped_bb)
return bounding_boxes
def _draw_2d_bbs(self, seg_img, bbs):
"""For debugging only
Args:
seg_img ([type]): [description]
bbs ([type]): [description]
"""
for bb_type in bbs:
_region = np.zeros(seg_img.shape)
if bb_type == "traffic_light":
for bb in bbs[bb_type]:
_region = np.zeros(seg_img.shape)
box = bb['bb']
_region[box[0][1]:box[1][1],box[0][0]:box[1][0]] = 1
seg_img[(_region == 1)] = 23
else:
for bb in bbs[bb_type]:
_region[bb[0][1]:bb[1][1],bb[0][0]:bb[1][0]] = 1
if bb_type == "stop_sign":
seg_img[(_region == 1)] = 26
elif bb_type == "vehicles":
seg_img[(_region == 1)] = 10
elif bb_type == "pedestrians":
seg_img[(_region == 1)] = 4
def _find_obstacle_3dbb(self, obstacle_type, max_distance=50):
"""Returns a list of 3d bounding boxes of type obstacle_type.
If the object does have a bounding box, this is returned. Otherwise a bb
of size 0.5,0.5,2 is returned at the origin of the object.
Args:
obstacle_type (String): Regular expression
max_distance (int, optional): max search distance. Returns all bbs in this radius. Defaults to 50.
Returns:
List: List of Boundingboxes
"""
obst = list()
_actors = self._world.get_actors()
_obstacles = _actors.filter(obstacle_type)
for _obstacle in _obstacles:
distance_to_car = _obstacle.get_transform().location.distance(self._vehicle.get_location())
if 0 < distance_to_car <= max_distance:
if hasattr(_obstacle, 'bounding_box'):
loc = _obstacle.bounding_box.location
_obstacle.get_transform().transform(loc)
extent = _obstacle.bounding_box.extent
_rotation_matrix = self.get_matrix(carla.Transform(carla.Location(0,0,0), _obstacle.get_transform().rotation))
rotated_extent = np.squeeze(np.array((np.array([[extent.x, extent.y, extent.z, 1]]) @ _rotation_matrix)[:3]))
bb = np.array([
[loc.x, loc.y, loc.z],
[rotated_extent[0], rotated_extent[1], rotated_extent[2]]
])
else:
loc = _obstacle.get_transform().location
bb = np.array([
[loc.x, loc.y, loc.z],
[0.5, 0.5, 2]
])
obst.append(bb)
return obst
def _get_2d_bb_baseline(self, obstacle, distance=2, cam='seg_front'):
"""Returns 2 coordinates for the baseline for 2d bbs in world coordinates
(distance behind trigger volume, as seen from camera)
Args:
obstacle (Actor): obstacle with
distance (int, optional): Distance behind trigger volume. Defaults to 2.
Returns:
np.ndarray: Baseline
"""
trigger = obstacle.trigger_volume
bb = self._create_2d_bb_points(trigger)
trig_loc_world = self._trig_to_world(bb, obstacle, trigger)
#self._draw_line(trig_loc_world[:,0], trig_loc_world[:,3], 0.7, color=(0, 255, 255))
cords_x_y_z = np.array(self._world_to_sensor(trig_loc_world, self._get_sensor_position(cam)))
indices = (-cords_x_y_z[0]).argsort()
# check crooked up boxes
if self._get_dist(cords_x_y_z[:,indices[0]],cords_x_y_z[:,indices[1]]) < self._get_dist(cords_x_y_z[:,indices[0]],cords_x_y_z[:,indices[2]]):
cords = cords_x_y_z[:, [indices[0],indices[2]]] + | np.array([[distance],[0],[0],[0]]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
from scipy.stats import f #fisher
from . import dv, zero_finding
import lmfit
LinAlgError = np.linalg.LinAlgError
from .base_functions import (_fold_exp,
_coh_gaussian,
_fold_exp_and_coh)
import scipy.linalg as linalg
posv = linalg.get_lapack_funcs(('posv'))
def direct_solve(a, b):
c, x, info = posv(a, b, lower=False,
overwrite_a=True,
overwrite_b=False)
return x
alpha = 0.001
def solve_mat(A, b_mat, method='ridge'):
"""
Returns the solution for the least squares problem |Ax - b_i|^2.
"""
if method == 'fast':
#return linalg.solve(A.T.dot(A), A.T.dot(b_mat), sym_pos=True)
return direct_solve(A.T.dot(A), A.T.dot(b_mat))
elif method == 'ridge':
X = np.dot(A.T, A)
X.flat[::A.shape[1] + 1] += alpha
Xy = np.dot(A.T, b_mat)
#return linalg.solve(X, Xy, sym_pos=True, overwrite_a=True)
return direct_solve(X, Xy)
elif method == 'qr':
cq, r = linalg.qr_multiply(A, b_mat)
return linalg.solve_triangular(r, cq)
elif method == 'cho':
c, l = linalg.cho_factor( A.T.dot(A))
return linalg.cho_solve((c, l), A.T.dot(b_mat))
elif method == 'lstsq':
return np.linalg.lstsq(A, b_mat)[0]
elif method == 'lasso':
import sklearn.linear_model as lm
s = lm.Lasso(fit_intercept=False)
s.alpha = alpha
s.fit(A, b_mat)
return s.coef_.T
elif method == 'enet':
import sklearn.linear_model as lm
s = lm.ElasticNet(fit_intercept=False, l1_ratio=0.2)
s.alpha = alpha
s.fit(A, b_mat)
return s.coef_.T
else:
raise ValueError('Unknow lsq method, use ridge, qr, fast or lasso')
class Fitter(object):
""" The fit object, takes all the need data and allows to fit it.
There a two different methods to fit the data. The fast one
assumes, that the data has no dispersion, so the base vectors
are the same for each channel. It is recommended to first work
with the fast version. Note that the fast version is able to handle
dispersion by using linear interpolation to transform the data
to dispersion free data.
The slower version calculates the base vector for each channel,
in which the dispersion is integrated.
The slower methods using the prefix full.
Parameters
----------
wl : ndarray(M)
Array containing the wavelength-coordinates.
t : ndarray(N)
Array containing the time-coordinates.
data : ndarry(N,M)
The 2d-data to fit.
model_coh : bool
If the model contains coherent artifacts at the time zero,
defaults to False.
model_disp : int
Degree of the polynomial which models the dispersion. If 1,
only a offset is modeled, which is very fast.
"""
def __init__(self, tup, model_coh=False, model_disp=1):
wl, t, data = tup
self.t = t
self.wl = wl
self.data = data
self.verbose = False
self.model_coh = model_coh
self.model_disp = model_disp
self.lsq_method = 'ridge'
self.num_exponentials = -1
self.weights = None
if model_disp > 1:
self.org = data[:]
self.disp_x = (wl - np.min(wl)) / (wl.max() - wl.min())
self.used_disp = np.zeros(model_disp)
def make_model(self, para):
"""
Calculates the model for given parameters. After calling, the
DAS is at self.c, the model at self.model.
If the dispersion is
modeled, it is done via linear interpolation. This way, the base-
vectors and their decomposition are only calculated once.
Parameters
----------
para : ndarray(N)
para has the following form:
[p_0, ..., p_M, w, tau_1, ..., tau_N]
Where p are the coefficients of the dispersion polynomial,
w is the width of the system response and tau are the decay
times. M is equal to self.model_disp.
"""
self.last_para = np.asarray(para)
if self._chk_for_disp_change(para):
# Only calculate interpolated data if necessary:
self.tn = np.poly1d(para[:self.model_disp])(self.disp_x)
tup = dv.tup(self.wl, self.t, self.org)
self.data = zero_finding.interpol(tup, self.tn)[2]
self.used_disp[:] = para[:self.model_disp]
self.num_exponentials = self.last_para.size - self.model_disp - 1
if self.model_disp <= 1:
self._build_xvec(para)
self.x_vec = np.nan_to_num(self.x_vec)
self.c = solve_mat(self.x_vec, self.data, self.lsq_method)
self.model = np.dot(self.x_vec, self.c)
self.c = self.c.T
def _chk_for_disp_change(self, para):
if self.model_disp > 1:
if np.any(para[:self.model_disp] != self.used_disp):
return True
return False
def _build_xvec(self, para):
"""
Build the base (the folded functions) for given parameters.
"""
para = np.array(para)
if self.verbose:
print(para)
try:
idx = (para != self._last)
except AttributeError:
#self._l
idx = [True] * len(para)
if self.model_disp == 1:
x0, w, taus = para[0], para[1], para[2:]
tau_idx = idx[2:]
else:
x0, w, taus = 0., para[0], para[1:]
tau_idx = idx[1:]
if any(idx[:2]) or self.model_disp or True:
if self.model_coh:
x_vec = np.zeros((self.t.size, self.num_exponentials + 3))
#print(taus)
a, b = _fold_exp_and_coh(self.t[:, None], w, x0, taus)
#print(a.shape, b.shape)
x_vec[:, -3:] = b[..., 0, :]
x_vec[:, :-3] = a[..., 0, :]
else:
x_vec = _fold_exp(self.t[:, None], w, x0, taus).squeeze()
self.x_vec = | np.nan_to_num(x_vec) | numpy.nan_to_num |
import unittest
import numpy as np
import numpy.testing as npt
from scipy.linalg import toeplitz
from doatools.model.array_elements import CustomNonisotropicSensor
from doatools.model.perturbations import LocationErrors, GainErrors, \
PhaseErrors, MutualCoupling
from doatools.model.arrays import GridBasedArrayDesign
from doatools.model.arrays import UniformLinearArray, CoPrimeArray, \
NestedArray, MinimumRedundancyLinearArray, \
UniformCircularArray, UniformRectangularArray
from doatools.model.sources import FarField1DSourcePlacement
class Test1DArrayDesigns(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_ula(self):
d0 = 2.
custom_name = 'TestULA'
ula = UniformLinearArray(6, d0, custom_name)
self.assertEqual(ula.size, 6)
self.assertEqual(ula.ndim, 1)
self.assertEqual(ula.name, custom_name)
npt.assert_allclose(ula.d0, np.array([d0]))
npt.assert_allclose(ula.bases, np.array([[d0]]))
npt.assert_array_equal(
ula.element_indices,
np.array([0, 1, 2, 3, 4, 5]).reshape((-1, 1))
)
npt.assert_array_equal(
ula.element_locations,
np.array([0., 2., 4., 6., 8., 10.]).reshape((-1, 1))
)
def test_nested(self):
d0 = 1.
nea = NestedArray(4, 3, d0)
self.assertEqual(nea.n1, 4)
self.assertEqual(nea.n2, 3)
self.assertEqual(nea.size, 7)
self.assertEqual(nea.ndim, 1)
npt.assert_allclose(nea.d0, np.array([d0]))
npt.assert_allclose(nea.bases, np.array([[d0]]))
npt.assert_array_equal(
nea.element_indices,
np.array([0, 1, 2, 3, 4, 9, 14]).reshape((-1, 1))
)
npt.assert_array_equal(
nea.element_locations,
np.array([0., 1., 2., 3., 4., 9., 14.]).reshape((-1, 1))
)
def test_coprime(self):
d0 = self.wavelength / 2
# M
cpa1 = CoPrimeArray(3, 5, d0, 'm')
self.assertEqual(cpa1.coprime_pair, (3, 5))
self.assertEqual(cpa1.mode, 'm')
self.assertEqual(cpa1.size, 7)
self.assertEqual(cpa1.ndim, 1)
npt.assert_array_equal(cpa1.d0, np.array([d0]))
npt.assert_array_equal(cpa1.bases, np.array([[d0]]))
npt.assert_array_equal(
cpa1.element_indices,
np.array([0, 3, 6, 9, 12, 5, 10]).reshape((-1, 1))
)
npt.assert_allclose(
cpa1.element_locations,
np.array([0., 1.5, 3., 4.5, 6., 2.5, 5.]).reshape((-1, 1))
)
# 2M
cpa2 = CoPrimeArray(3, 5, d0, '2m')
self.assertEqual(cpa2.coprime_pair, (3, 5))
self.assertEqual(cpa2.mode, '2m')
self.assertEqual(cpa2.size, 10)
self.assertEqual(cpa2.ndim, 1)
npt.assert_array_equal(cpa2.d0, np.array([d0]))
npt.assert_array_equal(cpa2.bases, np.array([[d0]]))
npt.assert_array_equal(
cpa2.element_indices,
np.array([0, 3, 6, 9, 12, 5, 10, 15, 20, 25]).reshape((-1, 1))
)
npt.assert_allclose(
cpa2.element_locations,
np.array([0., 1.5, 3., 4.5, 6., 2.5, 5., 7.5, 10., 12.5]).reshape((-1, 1))
)
def test_mra(self):
custom_name = 'TestMRA'
d0 = self.wavelength / 2
mra = MinimumRedundancyLinearArray(5, d0, custom_name)
self.assertEqual(mra.size, 5)
self.assertEqual(mra.ndim, 1)
npt.assert_array_equal(mra.d0, np.array([d0]))
npt.assert_array_equal(mra.bases, | np.array([[d0]]) | numpy.array |
import numpy as np
def confusion_matrix_binary(y_true,y_pred):
"""
Confusion matrix is an evaluation metric that is used to output the values of true positive and false negative (row wise).
In this function the actual classes come along the rows.
:param y_true: true class labels
:param y_pred: predicted class labels
:return: Returns the unique classes which points out the row-order of confusion matrix and the confusion matrix
"""
y_true = | np.array(y_true) | numpy.array |
# You are at the top. If you attempt to go any higher
# you will go beyond the known limits of the code
# universe where there are most certainly monsters
# might be able to get a speedup where I'm appending move and -move
# to do:
# use point raycaster to make a cloth_wrap option
# self colisions
# maybe do dynamic margins for when cloth is moving fast
# object collisions
# collisions need to properly exclude pinned and vertex pinned
# add bending springs
# add curl by shortening bending springs on one axis or diagonal
# independantly scale bending springs and structural to create buckling
# option to cache animation?
# Custom Source shape option for animated shapes
# collisions:
# Only need to check one of the edges for groups connected to a vertex
# for edge to face intersections...
# figure out where the edge hit the face
# figure out which end of the edge is inside the face
# move along the face normal to the surface for the point inside.
# if I reflect by flipping the vel around the face normal
# if it collides on the bounce it will get caught on the next iteration
# Sewing
# Could create super sewing that doesn't use edges but uses scalars along the edge to place virtual points
# sort of a barycentric virtual spring. Could even use it to sew to faces if I can think of a ui for where on the face.
# On an all triangle mesh, where sew edges come together there are long strait lines. This probably causes those edges to fold.
# in other words... creating diagonal springs between these edges will not solve the fold problem. Bend spring could do this.
# Bend springs:
# need to speed things up
# When faces have various sizes, the forces don't add up
# self collision
# where points are pinned, stuff is all jittery
'''??? Would it make sense to do self collisions with virtual edges ???'''
'''??? Could do dynamic collision margins for stuff moving fast ???'''
bl_info = {
"name": "Modeling Cloth",
"author": "<NAME> (<EMAIL>.com), <NAME> (@ucupumar)",
"version": (1, 0),
"blender": (2, 79, 0),
"location": "View3D > Extended Tools > Modeling Cloth",
"description": "Maintains the surface area of an object so it behaves like cloth",
"warning": "There might be an angry rhinoceros behind you",
"wiki_url": "",
"category": '3D View'}
import bpy
import bmesh
import numpy as np
from numpy import newaxis as nax
from bpy_extras import view3d_utils
from bpy.props import *
from bpy.app.handlers import persistent
from mathutils import *
import time, sys
#enable_numexpr = True
enable_numexpr = False
if enable_numexpr:
import numexpr as ne
you_have_a_sense_of_humor = False
#you_have_a_sense_of_humor = True
if you_have_a_sense_of_humor:
import antigravity
def get_co(ob, arr=None, key=None): # key
"""Returns vertex coords as N x 3"""
c = len(ob.data.vertices)
if arr is None:
arr = np.zeros(c * 3, dtype=np.float32)
if key is not None:
ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
ob.data.vertices.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
def get_proxy_co(ob, arr, me):
"""Returns vertex coords with modifier effects as N x 3"""
if arr is None:
arr = np.zeros(len(me.vertices) * 3, dtype=np.float32)
arr.shape = (arr.shape[0] //3, 3)
c = arr.shape[0]
me.vertices.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
def triangulate(me, ob=None):
"""Requires a mesh. Returns an index array for viewing co as triangles"""
obm = bmesh.new()
obm.from_mesh(me)
bmesh.ops.triangulate(obm, faces=obm.faces)
#obm.to_mesh(me)
count = len(obm.faces)
#tri_idx = np.zeros(count * 3, dtype=np.int32)
#me.polygons.foreach_get('vertices', tri_idx)
tri_idx = np.array([[v.index for v in f.verts] for f in obm.faces])
# Identify bend spring groups. Each edge gets paired with two points on tips of tris around edge
# Restricted to edges with two linked faces on a triangulated version of the mesh
if ob is not None:
link_ed = [e for e in obm.edges if len(e.link_faces) == 2]
ob.bend_eidx = np.array([[e.verts[0].index, e.verts[1].index] for e in link_ed])
fv = np.array([[[v.index for v in f.verts] for f in e.link_faces] for e in link_ed])
fv.shape = (fv.shape[0],6)
ob.bend_tips = np.array([[idx for idx in fvidx if idx not in e] for e, fvidx in zip(ob.bend_eidx, fv)])
obm.free()
return tri_idx#.reshape(count, 3)
def tri_normals_in_place(col, tri_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
col.origins = tri_co[:,0]
col.cross_vecs = tri_co[:,1:] - col.origins[:, nax]
col.normals = np.cross(col.cross_vecs[:,0], col.cross_vecs[:,1])
col.nor_dots = np.einsum("ij, ij->i", col.normals, col.normals)
col.normals /= np.sqrt(col.nor_dots)[:, nax]
def get_tri_normals(tr_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
origins = tr_co[:,0]
cross_vecs = tr_co[:,1:] - origins[:, nax]
return cross_vecs, np.cross(cross_vecs[:,0], cross_vecs[:,1]), origins
def closest_points_edge(vec, origin, p):
'''Returns the location of the point on the edge'''
vec2 = p - origin
d = (vec2 @ vec) / (vec @ vec)
cp = vec * d[:, nax]
return cp, d
def proxy_in_place(col, me):
"""Overwrite vert coords with modifiers in world space"""
me.vertices.foreach_get('co', col.co.ravel())
col.co = apply_transforms(col.ob, col.co)
def apply_rotation(col):
"""When applying vectors such as normals we only need
to rotate"""
m = np.array(col.ob.matrix_world)
mat = m[:3, :3].T
col.v_normals = col.v_normals @ mat
def proxy_v_normals_in_place(col, world=True, me=None):
"""Overwrite vert coords with modifiers in world space"""
me.vertices.foreach_get('normal', col.v_normals.ravel())
if world:
apply_rotation(col)
def proxy_v_normals(ob, me):
"""Overwrite vert coords with modifiers in world space"""
arr = np.zeros(len(me.vertices) * 3, dtype=np.float32)
me.vertices.foreach_get('normal', arr)
arr.shape = (arr.shape[0] //3, 3)
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
return arr @ mat
def apply_transforms(ob, co):
"""Get vert coords in world space"""
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def apply_in_place(ob, arr, cloth):
"""Overwrite vert coords in world space"""
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
arr[:] = arr @ mat + loc
#cloth.co = cloth.co @ mat + loc
def applied_key_co(ob, arr=None, key=None):
"""Get vert coords in world space"""
c = len(ob.data.vertices)
if arr is None:
arr = np.zeros(c * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr)
arr.shape = (c, 3)
m = np.array(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def revert_transforms(ob, co):
"""Set world coords on object.
Run before setting coords to deal with object transforms
if using apply_transforms()"""
m = np.linalg.inv(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def revert_in_place(ob, co):
"""Revert world coords to object coords in place."""
m = np.linalg.inv(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
co[:] = co @ mat + loc
def revert_rotation(ob, co):
"""When reverting vectors such as normals we only need
to rotate"""
#m = np.linalg.inv(ob.matrix_world)
m = np.array(ob.matrix_world)
mat = m[:3, :3] # rotates backwards without T
return co @ mat
def get_last_object():
"""Finds cloth objects for keeping settings active
while selecting other objects like pins"""
cloths = [i for i in bpy.data.objects if i.mclo.enable] # so we can select an empty and keep the settings menu up
if bpy.context.object.mclo.enable:
return cloths, bpy.context.object
if len(cloths) > 0:
ob = bpy.context.scene.mclo.last_object
return cloths, ob
return None, None
def get_poly_centers(ob, type=np.float32, mesh=None):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
p_count = len(mesh.polygons)
center = np.zeros(p_count * 3, dtype=type)
mesh.polygons.foreach_get('center', center)
center.shape = (p_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
return center
def simple_poly_centers(ob, key=None):
if key is not None:
s_key = ob.data.shape_keys.key_blocks[key].data
return np.squeeze([[np.mean([ob.data.vertices[i].co for i in p.vertices], axis=0)] for p in ob.data.polygons])
def get_poly_normals(ob, type=np.float32, mesh=None):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
p_count = len(mesh.polygons)
normal = np.zeros(p_count * 3, dtype=type)
mesh.polygons.foreach_get('normal', normal)
normal.shape = (p_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
return normal
def get_v_normals(ob, arr, mesh):
"""Since we're reading from a shape key we have to use
a proxy mesh."""
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
#v_count = len(mesh.vertices)
#normal = np.zeros(v_count * 3)#, dtype=type)
mesh.vertices.foreach_get('normal', arr.ravel())
#normal.shape = (v_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
def get_v_nor(ob, nor_arr):
ob.data.vertices.foreach_get('normal', nor_arr.ravel())
return nor_arr
def closest_point_edge(e1, e2, p):
'''Returns the location of the point on the edge'''
vec1 = e2 - e1
vec2 = p - e1
d = np.dot(vec2, vec1) / np.dot(vec1, vec1)
cp = e1 + vec1 * d
return cp
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
'''Creates vertex groups and sets weights. "groups" is a list of strings
for the names of the groups. "weights" is a list of weights corresponding
to the strings. Each vertex is assigned a weight for each vertex group to
avoid calling vertex weights that are not assigned. If the groups are
already present, the previous weights will be preserved. To reset weights
delete the created groups'''
if ob is None:
ob = bpy.context.object
vg = ob.vertex_groups
for g in range(0, len(groups)):
if groups[g] not in vg.keys(): # Don't create groups if there are already there
vg.new(groups[g])
vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
else:
vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups.
def get_bmesh(obj=None):
ob = get_last_object()[1]
if ob is None:
ob = obj
obm = bmesh.new()
if ob.mode == 'OBJECT':
obm.from_mesh(ob.data)
elif ob.mode == 'EDIT':
obm = bmesh.from_edit_mesh(ob.data)
return obm
def get_minimal_edges(ob):
obm = get_bmesh(ob)
obm.edges.ensure_lookup_table()
obm.verts.ensure_lookup_table()
obm.faces.ensure_lookup_table()
# get sew edges:
sew = [i.index for i in obm.edges if len(i.link_faces)==0]
# so if I have a vertex with one or more sew edges attached
# I need to get the mean location of all verts shared by those edges
# every one of those verts needs to move towards the total mean
# get linear edges
e_count = len(obm.edges)
eidx = np.zeros(e_count * 2, dtype=np.int32)
e_bool = np.zeros(e_count, dtype=np.bool)
e_bool[sew] = True
ob.data.edges.foreach_get('vertices', eidx)
eidx.shape = (e_count, 2)
# get diagonal edges:
diag_eidx = []
start = 0
stop = 0
step_size = [len(i.verts) for i in obm.faces]
p_v_count = np.sum(step_size)
p_verts = np.ones(p_v_count, dtype=np.int32)
ob.data.polygons.foreach_get('vertices', p_verts)
# can only be understood on a good day when the coffee flows (uses rolling and slicing)
# creates uniqe diagonal edge sets
for f in obm.faces:
fv_count = len(f.verts)
stop += fv_count
if fv_count > 3: # triangles are already connected by linear springs
skip = 2
f_verts = p_verts[start:stop]
for fv in range(len(f_verts)):
if fv > 1: # as we go around the loop of verts in face we start overlapping
skip = fv + 1 # this lets us skip the overlap so we don't have mirror duplicates
roller = np.roll(f_verts, fv)
for r in roller[skip:-1]:
diag_eidx.append([roller[0], r])
start += fv_count
# eidx groups
sew_eidx = eidx[e_bool]
lin_eidx = eidx[~e_bool]
diag_eidx = np.array(diag_eidx)
# deal with sew verts connected to more than one edge
s_t_rav = sew_eidx.T.ravel()
s_uni, s_inv, s_counts = np.unique(s_t_rav,return_inverse=True, return_counts=True)
s_multi = s_counts > 1
multi_groups = None
if np.any(s_counts):
multi_groups = []
ls = sew_eidx[:,0]
rs = sew_eidx[:,1]
for i in s_uni[s_multi]:
gr = np.array([i])
gr = np.append(gr, ls[rs==i])
gr = np.append(gr, rs[ls==i])
multi_groups.append(gr)
return lin_eidx, diag_eidx, sew_eidx, multi_groups
def add_remove_virtual_springs(remove=False):
ob = get_last_object()[1]
cloth = get_cloth_data(ob)
obm = get_bmesh()
obm.verts.ensure_lookup_table()
count = len(obm.verts)
idxer = np.arange(count, dtype=np.int32)
sel = np.array([v.select for v in obm.verts])
selected = idxer[sel]
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
if virtual_springs.shape[0] == 0:
virtual_springs.shape = (0, 2)
if remove:
ls = virtual_springs[:, 0]
in_sel = np.in1d(ls, idxer[sel])
deleter = np.arange(ls.shape[0], dtype=np.int32)[in_sel]
for i in reversed(deleter):
ob.mclo.virtual_springs.remove(i)
return
existing = np.append(cloth.eidx, virtual_springs, axis=0)
flip = existing[:, ::-1]
existing = np.append(existing, flip, axis=0)
ls = existing[:,0]
#springs = []
for i in idxer[sel]:
# to avoid duplicates:
# where this vert occurs on the left side of the existing spring list
v_in = existing[i == ls]
v_in_r = v_in[:,1]
not_in = selected[~np.in1d(selected, v_in_r)]
idx_set = not_in[not_in != i]
for sv in idx_set:
#springs.append([i, sv])
new_vs = ob.mclo.virtual_springs.add()
new_vs.vertex_id_1 = i
new_vs.vertex_id_2 = sv
# gets appended to eidx in the cloth_init function after calling get connected polys in case geometry changes
def generate_guide_mesh():
"""Makes the arrow that appears when creating pins"""
verts = [[0.0, 0.0, 0.0], [-0.01, -0.01, 0.1], [-0.01, 0.01, 0.1], [0.01, -0.01, 0.1], [0.01, 0.01, 0.1], [-0.03, -0.03, 0.1], [-0.03, 0.03, 0.1], [0.03, 0.03, 0.1], [0.03, -0.03, 0.1], [-0.01, -0.01, 0.2], [-0.01, 0.01, 0.2], [0.01, -0.01, 0.2], [0.01, 0.01, 0.2]]
edges = [[0, 5], [5, 6], [6, 7], [7, 8], [8, 5], [1, 2], [2, 4], [4, 3], [3, 1], [5, 1], [2, 6], [4, 7], [3, 8], [9, 10], [10, 12], [12, 11], [11, 9], [3, 11], [9, 1], [2, 10], [12, 4], [6, 0], [7, 0], [8, 0]]
faces = [[0, 5, 6], [0, 6, 7], [0, 7, 8], [0, 8, 5], [1, 3, 11, 9], [1, 2, 6, 5], [2, 4, 7, 6], [4, 3, 8, 7], [3, 1, 5, 8], [12, 10, 9, 11], [4, 2, 10, 12], [3, 4, 12, 11], [2, 1, 9, 10]]
name = 'ModelingClothPinGuide'
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
else:
mesh = bpy.data.meshes.new('ModelingClothPinGuide')
mesh.from_pydata(verts, edges, faces)
mesh.update()
mesh_ob = bpy.data.objects.new(name, mesh)
bpy.context.scene.objects.link(mesh_ob)
mesh_ob.show_x_ray = True
return mesh_ob
def create_guide():
"""Spawns the guide"""
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
return mesh_ob
mesh_ob = generate_guide_mesh()
bpy.context.scene.objects.active = mesh_ob
bpy.ops.object.material_slot_add()
if 'ModelingClothPinGuide' in bpy.data.materials:
mat = bpy.data.materials['ModelingClothPinGuide']
else:
mat = bpy.data.materials.new(name='ModelingClothPinGuide')
mat.use_transparency = True
mat.alpha = 0.35
mat.emit = 2
mat.game_settings.alpha_blend = 'ALPHA_ANTIALIASING'
mat.diffuse_color = (1, 1, 0)
mesh_ob.material_slots[0].material = mat
return mesh_ob
def delete_guide():
"""Deletes the arrow"""
if 'ModelingClothPinGuide' in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects['ModelingClothPinGuide'])
if 'ModelingClothPinGuide' in bpy.data.meshes:
guide_mesh = bpy.data.meshes['ModelingClothPinGuide']
guide_mesh.user_clear()
bpy.data.meshes.remove(guide_mesh)
def scale_source(multiplier):
"""grow or shrink the source shape"""
ob = get_last_object()[1]
if ob is not None:
if ob.mclo.enable:
count = len(ob.data.vertices)
co = np.zeros(count*3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', co)
co.shape = (count, 3)
mean = np.mean(co, axis=0)
co -= mean
co *= multiplier
co += mean
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_set('co', co.ravel())
cloth = get_cloth_data(ob)
if hasattr(cloth, 'cy_dists'):
cloth.cy_dists *= multiplier
def reset_shapes(ob=None):
"""Sets the modeling cloth key to match the source key.
Will regenerate shape keys if they are missing"""
if ob is None:
if bpy.context.object.mclo.enable:
ob = bpy.context.object
else:
ob = bpy.context.scene.mclo.last_object
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
keys = ob.data.shape_keys.key_blocks
count = len(ob.data.vertices)
co = np.zeros(count * 3, dtype=np.float32)
keys['Basis'].data.foreach_get('co', co)
#co = applied_key_co(ob, None, 'modeling cloth source key')
#keys['modeling cloth source key'].data.foreach_set('co', co)
keys['modeling cloth key'].data.foreach_set('co', co)
# reset the data stored in the class
cloth = get_cloth_data(ob)
cloth.vel[:] = 0
co.shape = (co.shape[0]//3, 3)
cloth.co = co
keys['modeling cloth key'].mute = True
keys['modeling cloth key'].mute = False
def get_spring_mix(ob, eidx):
rs = []
ls = []
minrl = []
for i in eidx:
r = eidx[eidx == i[1]].shape[0]
l = eidx[eidx == i[0]].shape[0]
rs.append (min(r,l))
ls.append (min(r,l))
mix = 1 / np.array(rs + ls, dtype=np.float32) ** 1.2
return mix
def collision_data_update(self, context):
ob = self.id_data
if ob.mclo.self_collision:
create_cloth_data(ob)
def refresh_noise(self, context):
ob = self.id_data
cloth = get_cloth_data(ob)
if cloth:
zeros = np.zeros(cloth.count, dtype=np.float32)
random = np.random.random(cloth.count)
zeros[:] = random
cloth.noise = ((zeros + -0.5) * ob.mclo.noise * 0.1)[:, nax]
def generate_wind(wind_vec, ob, cloth):
"""Maintains a wind array and adds it to the cloth vel"""
tri_nor = cloth.normals # non-unit calculated by tri_normals_in_place() per each triangle
w_vec = revert_rotation(ob, wind_vec)
turb = ob.mclo.turbulence
if turb != 0:
w_vec += np.random.random(3).astype(np.float32) * turb * np.mean(w_vec) * 4
# only blow on verts facing the wind
perp = np.abs(tri_nor @ w_vec)
cloth.wind += w_vec
cloth.wind *= perp[:, nax][:, nax]
# reshape for add.at
shape = cloth.wind.shape
cloth.wind.shape = (shape[0] * 3, 3)
cloth.wind *= cloth.tri_mix
np.add.at(cloth.vel, cloth.tridex.ravel(), cloth.wind)
cloth.wind.shape = shape
def generate_inflate(ob, cloth):
"""Blow it up baby!"""
tri_nor = cloth.normals #* ob.mclo.inflate # non-unit calculated by tri_normals_in_place() per each triangle
#tri_nor /= np.einsum("ij, ij->i", tri_nor, tri_nor)[:, nax]
# reshape for add.at
shape = cloth.inflate.shape
cloth.inflate += tri_nor[:, nax] * ob.mclo.inflate# * cloth.tri_mix
cloth.inflate.shape = (shape[0] * 3, 3)
cloth.inflate *= cloth.tri_mix
np.add.at(cloth.vel, cloth.tridex.ravel(), cloth.inflate)
cloth.inflate.shape = shape
cloth.inflate *= 0
def get_quat(rad, axis):
theta = (rad * 0.5)
w = np.cos(theta)
q_axis = axis * np.sin(theta)[:, nax]
return w, q_axis
def q_rotate(co, w, axis):
"""Takes an N x 3 numpy array and returns that array rotated around
the axis by the angle in radians w. (standard quaternion)"""
move1 = np.cross(axis, co)
move2 = np.cross(axis, move1)
move1 *= w[:, nax]
return co + (move1 + move2) * 2
def bend_springs(cloth, co, measure=None):
bend_eidx, tips = cloth.bend_eidx, cloth.bend_tips
tips_co = co[tips]
bls, brs = bend_eidx[:,0], bend_eidx[:, 1]
b_oris = co[bls]
be_vecs = co[brs] - b_oris
te_vecs = tips_co - b_oris[:, nax]
bcp_dots = np.einsum('ij,ikj->ik', be_vecs, te_vecs)
be_dots = np.einsum('ij,ij->i', be_vecs, be_vecs)
b_div = np.nan_to_num(bcp_dots / be_dots[:, nax])
tcp = be_vecs[:, nax] * b_div[:, :, nax]
# tip vecs from cp
tcp_vecs = te_vecs - tcp
tcp_dots = np.einsum('ijk,ijk->ij',tcp_vecs, tcp_vecs)
u_tcp_vecs = tcp_vecs / np.sqrt(tcp_dots)[:, :, nax]
u_tcp_ls = u_tcp_vecs[:, 0]
u_tcp_rs = u_tcp_vecs[:, 1]
# dot of unit tri tips around axis
angle_dot = np.einsum('ij,ij->i', u_tcp_ls, u_tcp_rs)
#paralell = angle_dot < -.9999999
angle = np.arccos(np.clip(angle_dot, -1, 1)) # values outside and arccos gives nan
#angle = np.arccos(angle_dot) # values outside and arccos gives nan
# get the angle sign
tcp_cross = np.cross(u_tcp_vecs[:, 0], u_tcp_vecs[:, 1])
sign = np.sign(np.einsum('ij,ij->i', be_vecs, tcp_cross))
if measure is None:
s = np.arccos(angle_dot)
s *= sign
s[angle_dot < -.9999999] = np.pi
return s
angle *= sign
# rotate edges with quaternypoos
u_be_vecs = be_vecs / np.sqrt(be_dots)[:, nax]
b_dif = angle - measure
l_ws, l_axes = get_quat(b_dif, u_be_vecs)
r_ws, r_axes = l_ws, -l_axes
# move tcp vecs so their origin is in the middle:
#u_tcp_vecs *= 0.5
# should I rotate the unit vecs or the source?
# rotating the unit vecs here.
#stiff = cloth.ob.modeling_cloth_bend_stiff * 0.0057
stiff = cloth.ob.mclo.bend_stiff * 0.0057
rot_ls = q_rotate(u_tcp_ls, l_ws, l_axes)
l_force = (rot_ls - u_tcp_ls) * stiff
rot_rs = q_rotate(u_tcp_rs, r_ws, r_axes)
r_force = (rot_rs - u_tcp_rs) * stiff
np.add.at(cloth.co, tips[:, 0], l_force)
np.add.at(cloth.co, tips[:, 1], r_force)
np.subtract.at(cloth.co, bend_eidx.ravel(), np.tile(r_force * .5, 2).reshape(r_force.shape[0] * 2, 3))
np.subtract.at(cloth.co, bend_eidx.ravel(), np.tile(l_force * .5, 2).reshape(l_force.shape[0] * 2, 3))
return
cloth.co[tips[:, 0]] += l_force
cloth.co[tips[:, 1]] += r_force
#cloth.co[bend_eidx] -= l_force
cloth.co[bend_eidx] -= r_force[:, nax]
cloth.co[bend_eidx] -= l_force[:, nax]
#cloth.co[brs] -= r_force
#print("bend here")
# will need to read bend springs continuously when using
# a dynamic source shape. Guess I should do that now...
# need the angle at each edge
# need to get the tips of each tri around each edge
# should be a pair everywhere there is a link face in
# the tri bmesh
"""
With no sign I just get the dot in radians.
Rotation should move towards the shortest distance
to the same dot in radians.
Without getting the sign at all, it will always rotate
in the same direction to go back to the target.
By multiplying the dif by the sign, I can make it spin
the other way to go back to the target dot in rads
"""
# sewing functions ---------------->>>
def create_sew_edges():
bpy.ops.mesh.bridge_edge_loops()
bpy.ops.mesh.delete(type='ONLY_FACE')
return
#highlight a sew edge
#compare vertex counts
#subdivide to match counts
#distribute and smooth back into mesh
#create sew lines
# sewing functions ---------------->>>
def check_and_get_pins_and_hooks(ob):
scene = bpy.context.scene
pins = []
hooks = []
cull_ids = []
for i, pin in enumerate(ob.mclo.pins):
# Check if hook object still exists
if not pin.hook or (pin.hook and not scene.objects.get(pin.hook.name)):
cull_ids.append(i)
else:
#vert = ob.data.vertices[pin.vertex_id]
pins.append(pin.vertex_id)
hooks.append(pin.hook)
# Delete missing hooks pointers
for i in reversed(cull_ids):
pin = ob.mclo.pins[i]
if pin.hook:
bpy.data.objects.remove(pin.hook)
ob.mclo.pins.remove(i)
return pins, hooks
class ClothData:
pass
def create_cloth_data(ob):
"""Creates instance of cloth object with attributes needed for engine"""
scene = bpy.context.scene
data = scene.modeling_cloth_data_set
# Try to get the cloth data first
try:
cloth = data[ob.name]
except:
# Search for possible name changes
cloth = None
for ob_name, c in data.items():
if c.ob == ob:
# Rename the key
data[ob.name] = data.pop(ob_name)
cloth = data[ob.name]
break
# If cloth still not found
if not cloth:
cloth = ClothData()
data[ob.name] = cloth
cloth.ob = ob
# get proxy object
#proxy = ob.to_mesh(bpy.context.scene, False, 'PREVIEW')
# ----------------
scene.objects.active = ob
cloth.idxer = np.arange(len(ob.data.vertices), dtype=np.int32)
# data only accesible through object mode
mode = ob.mode
if mode == 'EDIT':
bpy.ops.object.mode_set(mode='OBJECT')
# data is read from a source shape and written to the display shape so we can change the target springs by changing the source shape
#cloth.name = ob.name
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
cloth.count = len(ob.data.vertices)
# we can set a large group's pin state using the vertex group. No hooks are used here
if 'modeling_cloth_pin' not in ob.vertex_groups:
cloth.pin_group = create_vertex_groups(groups=['modeling_cloth_pin'], weights=[0.0], ob=None)
for i in range(cloth.count):
try:
ob.vertex_groups['modeling_cloth_pin'].weight(i)
except RuntimeError:
# assign a weight of zero
ob.vertex_groups['modeling_cloth_pin'].add(range(0,len(ob.data.vertices)), 0.0, 'REPLACE')
cloth.pin_bool = ~np.array([ob.vertex_groups['modeling_cloth_pin'].weight(i) for i in range(cloth.count)], dtype=np.bool)
# unique edges------------>>>
uni_edges = get_minimal_edges(ob)
if len(uni_edges[1]) > 0:
cloth.eidx = np.append(uni_edges[0], uni_edges[1], axis=0)
else:
cloth.eidx = uni_edges[0]
#cloth.eidx = uni_edges[0][0]
if len(ob.mclo.virtual_springs) > 0:
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
cloth.eidx = np.append(cloth.eidx, virtual_springs, axis=0)
cloth.eidx_tiler = cloth.eidx.T.ravel()
mixology = get_spring_mix(ob, cloth.eidx)
#eidx1 = np.copy(cloth.eidx)
cloth.pindexer = np.arange(cloth.count, dtype=np.int32)[cloth.pin_bool]
cloth.unpinned = np.in1d(cloth.eidx_tiler, cloth.pindexer)
cloth.eidx_tiler = cloth.eidx_tiler[cloth.unpinned]
cloth.sew_edges = uni_edges[2]
cloth.multi_sew = uni_edges[3]
# unique edges------------>>>
#cloth.pcount = pindexer.shape[0]
cloth.sco = np.zeros(cloth.count * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', cloth.sco)
cloth.sco.shape = (cloth.count, 3)
cloth.co = np.zeros(cloth.count * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth key'].data.foreach_get('co', cloth.co)
cloth.co.shape = (cloth.count, 3)
co = cloth.co
cloth.vel = np.zeros(cloth.count * 3, dtype=np.float32)
cloth.vel.shape = (cloth.count, 3)
cloth.vel_start = np.zeros(cloth.count * 3, dtype=np.float32)
cloth.vel_start.shape = (cloth.count, 3)
cloth.self_col_vel = np.copy(co)
cloth.v_normals = np.zeros(co.shape, dtype=np.float32)
#get_v_normals(ob, cloth.v_normals, proxy)
#noise---
noise_zeros = np.zeros(cloth.count, dtype=np.float32)
random = np.random.random(cloth.count).astype(np.float32)
noise_zeros[:] = random
cloth.noise = ((noise_zeros + -0.5) * ob.mclo.noise * 0.1)[:, nax]
#cloth.waiting = False
#cloth.clicked = False # for the grab tool
# this helps with extra springs behaving as if they had more mass---->>>
cloth.mix = mixology[cloth.unpinned][:, nax]
# -------------->>>
# new self collisions:
cloth.tridex = triangulate(ob.data, cloth)
cloth.tridexer = np.arange(cloth.tridex.shape[0], dtype=np.int32)
cloth.tri_co = cloth.co[cloth.tridex]
tri_normals_in_place(cloth, cloth.tri_co) # non-unit normals
# -------------->>>
tri_uni, tri_inv, tri_counts = np.unique(cloth.tridex, return_inverse=True, return_counts=True)
cloth.tri_mix = (1 / tri_counts[tri_inv])[:, nax]
cloth.wind = np.zeros(cloth.tri_co.shape, dtype=np.float32)
cloth.inflate = np.zeros(cloth.tri_co.shape, dtype=np.float32)
bpy.ops.object.mode_set(mode=mode)
# for use with a static source shape:
cloth.source_angles = bend_springs(cloth, cloth.sco, None)
svecs = cloth.sco[cloth.eidx[:, 1]] - cloth.sco[cloth.eidx[:, 0]]
cloth.sdots = np.einsum('ij,ij->i', svecs, svecs)
# for doing static cling
# cloth.col_idx = np.array([], dtype=np.int32)
# cloth.re_col = np.empty((0,3), dtype=np.float32)
print('INFO: Cloth data for', ob.name, 'is created!')
return cloth
def run_handler(ob, cloth):
T = time.time()
scene = bpy.context.scene
extra_data = scene.modeling_cloth_data_set_extra
col_data = scene.modeling_cloth_data_set_colliders
if not ob.mclo.waiting and ob.mode != 'OBJECT':
ob.mclo.waiting = True
if ob.mclo.waiting:
if ob.mode == 'OBJECT':
create_cloth_data(ob)
ob.mclo.waiting = False
if not ob.mclo.waiting:
eidx = cloth.eidx # world's most important variable
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', cloth.sco.ravel())
sco = cloth.sco
co = cloth.co
co[cloth.pindexer] += cloth.noise[cloth.pindexer]
#co += cloth.noise
cloth.noise *= ob.mclo.noise_decay
# mix in vel before collisions and sewing
co[cloth.pindexer] += cloth.vel[cloth.pindexer]
cloth.vel_start[:] = co
# measure source -------------------------->>>
dynamic = True # can store for speedup if source shape is static
# bend spring calculations:
if ob.mclo.bend_stiff != 0:
# measure bend source if using dynamic source:
source_angles = cloth.source_angles
if dynamic:
source_angles = bend_springs(cloth, sco, None)
# linear spring measure
sdots = cloth.sdots
if dynamic:
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', sco.ravel())
svecs = sco[eidx[:, 1]] - sco[eidx[:, 0]]
sdots = np.einsum('ij,ij->i', svecs, svecs)
# ----------------------------------------->>>
force = ob.mclo.spring_force
mix = cloth.mix * force
pin_list = []
if len(ob.mclo.pins) > 0:
pin_list, hook_list = check_and_get_pins_and_hooks(ob)
hook_co = np.array([ob.matrix_world.inverted() * hook.matrix_world.to_translation()
for hook in hook_list])
ers = eidx[:, 1]
els = eidx[:, 0]
for x in range(ob.mclo.iterations):
# bend spring calculations:
if ob.mclo.bend_stiff != 0:
bend_springs(cloth, co, source_angles)
# add pull
vecs = co[eidx[:, 1]] - co[eidx[:, 0]]
dots = np.einsum('ij,ij->i', vecs, vecs)
div = np.nan_to_num(sdots / dots)
swap = vecs * np.sqrt(div)[:, nax]
move = vecs - swap
# pull separate test--->>>
push = ob.mclo.push_springs
if push == 0:
move[div > 1] = 0
else:
move[div > 1] *= push
# pull only test--->>>
tiled_move = np.append(move, -move, axis=0)[cloth.unpinned] * mix # * mix for stability: force multiplied by 1/number of springs
np.add.at(cloth.co, cloth.eidx_tiler, tiled_move)
# for doing static cling
# cloth.co[cloth.col_idx] = cloth.re_col
cloth.co[~cloth.pin_bool] = cloth.vel_start[~cloth.pin_bool]
if pin_list:
cloth.co[pin_list] = hook_co
# grab inside spring iterations
if ob.mclo.clicked: # for the grab tool
cloth.co[extra_data['vidx']] = np.array(extra_data['stored_vidx']) + np.array(+ extra_data['move'])
# refresh normals for inflate wind and self collisions
cloth.tri_co = cloth.co[cloth.tridex]
tri_normals_in_place(cloth, cloth.tri_co) # unit normals
# add effects of velocity and Gravity to the vel array for later
spring_dif = cloth.co - cloth.vel_start
#if ob.mclo.bend_stiff > 0:
#for
# non-unit normals might be better for inflate and wind because
# their strength is affected by the area as it is should be
#place after wind and inflate unless those are added to vel after collisions
# get proxy object
#proxy = ob.to_mesh(bpy.context.scene, False, 'PREVIEW')
#proxy = ob.data
#get_v_normals(ob, cloth.v_normals, proxy)
# gravity
grav = ob.mclo.gravity * 0.01# / ob.mclo.iterations)
if grav != 0:
cloth.vel += revert_rotation(ob, np.array([0, 0, grav])) / np.array(ob.scale)
# can cheat here:
#spring_mean = np.mean(spring_dif, axis=0)
#cloth.vel += spring_mean * 20
# inextensible calc:
cloth.vel += spring_dif * 2
# The amount of drag increases with speed.
# have to convert to to a range between 0 and 1
#squared_move_dist = np.sqrt(np.einsum("ij, ij->i", cloth.vel, cloth.vel))
squared_move_dist = np.einsum("ij, ij->i", cloth.vel, cloth.vel)
squared_move_dist += 1
cloth.vel *= (1 / (squared_move_dist / ob.mclo.velocity))[:, nax]
#cloth.vel *= ob.mclo.velocity
# wind:
x = ob.mclo.wind_x
y = ob.mclo.wind_y
z = ob.mclo.wind_z
wind_vec = np.array([x,y,z])
check_wind = wind_vec != 0
if np.any(check_wind):
generate_wind(wind_vec, ob, cloth)
# inflate
inflate = ob.mclo.inflate
if inflate != 0:
generate_inflate(ob, cloth)
#cloth.v_normals *= inflate
#cloth.vel += cloth.v_normals
if ob.mclo.sew != 0:
if len(cloth.sew_edges) > 0:
sew_edges = cloth.sew_edges
rs = co[sew_edges[:,1]]
ls = co[sew_edges[:,0]]
sew_vecs = (rs - ls) * 0.5 * ob.mclo.sew
co[sew_edges[:,1]] -= sew_vecs
co[sew_edges[:,0]] += sew_vecs
# for sew verts with more than one sew edge
if cloth.multi_sew is not None:
for sg in cloth.multi_sew:
cosg = co[sg]
meanie = np.mean(cosg, axis=0)
sg_vecs = meanie - cosg
co[sg] += sg_vecs * ob.mclo.sew
# !!!!! need to try adding in the velocity before doing the collision stuff
# !!!!! so vel would be added here after wind and inflate but before collision
# floor ---
if ob.mclo.floor:
floored = cloth.co[:,2] < 0
cloth.vel[:,2][floored] *= -1
cloth.vel[floored] *= .1
cloth.co[:, 2][floored] = 0
# floor ---
# objects ---
#T = time.time()
if ob.mclo.object_collision_detect:
if ob.mclo.self_collision:
self_collide(ob)
cull_ids = []
for i, cp in enumerate(scene.mclo.collider_pointers):
# Check if object is still exists
if not cp.ob or (cp.ob and not scene.objects.get(cp.ob.name)):
cull_ids.append(i)
continue
#if cp.ob == ob:
# self_collide(ob)
if cp.ob != ob:
object_collide(ob, cp.ob)
# Remove collider missing object from pointer list
for i in reversed(cull_ids):
o = scene.mclo.collider_pointers[i].ob
if o:
o.mclo.object_collision = False
else:
scene.mclo.collider_pointers.remove(i)
#print(time.time()-T, "the whole enchalada")
# objects ---
cloth.co[~cloth.pin_bool] = cloth.vel_start[~cloth.pin_bool]
if pin_list:
cloth.co[pin_list] = hook_co
cloth.vel[pin_list] = 0
if ob.mclo.clicked: # for the grab tool
cloth.co[extra_data['vidx']] = np.array(extra_data['stored_vidx']) + np.array(+ extra_data['move'])
ob.data.shape_keys.key_blocks['modeling cloth key'].data.foreach_set('co', cloth.co.ravel())
ob.data.shape_keys.key_blocks['modeling cloth key'].mute = True
ob.data.shape_keys.key_blocks['modeling cloth key'].mute = False
# remove proxy
#proxy.user_clear()
#bpy.data.meshes.remove(proxy)
#del(proxy)
#print(time.time()-T, "the entire handler time")
# +++++++++++++ object collisions ++++++++++++++
def bounds_check(co1, co2, fudge):
"""Returns True if object bounding boxes intersect.
Have to add the fudge factor for collision margins"""
check = False
co1_max = None # will never return None if check is true
co1_min = np.min(co1, axis=0)
co2_max = np.max(co2, axis=0)
if np.all(co2_max + fudge > co1_min):
co1_max = np.max(co1, axis=0)
co2_min = np.min(co2, axis=0)
if np.all(co1_max > co2_min - fudge):
check = True
return check, co1_min, co1_max # might as well reuse the checks
def triangle_bounds_check(tri_co, co_min, co_max, idxer, fudge):
"""Returns a bool aray indexing the triangles that
intersect the bounds of the object"""
# min check cull step 1
tri_min = np.min(tri_co, axis=1) - fudge
check_min = co_max > tri_min
in_min = np.all(check_min, axis=1)
# max check cull step 2
idx = idxer[in_min]
tri_max = np.max(tri_co[in_min], axis=1) + fudge
check_max = tri_max > co_min
in_max = np.all(check_max, axis=1)
in_min[idx[~in_max]] = False
return in_min, tri_min[in_min], tri_max[in_max] # can reuse the min and max
def tri_back_check(co, tri_min, tri_max, idxer, fudge):
"""Returns a bool aray indexing the vertices that
intersect the bounds of the culled triangles"""
# min check cull step 1
tb_min = np.min(tri_min, axis=0) - fudge
check_min = co > tb_min
in_min = np.all(check_min, axis=1)
idx = idxer[in_min]
# max check cull step 2
tb_max = np.max(tri_max, axis=0) + fudge
check_max = co[in_min] < tb_max
in_max = np.all(check_max, axis=1)
in_min[idx[~in_max]] = False
return in_min
# -------------------------------------------------------
# -------------------------------------------------------
def zxy_grid(co_y, tymin, tymax, subs, c, t, c_peat, t_peat):
# create linespace grid between bottom and top of tri z
#subs = 7
t_min = np.min(tymin)
t_max = np.max(tymax)
divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)
# figure out which triangles and which co are in each section
co_bools = (co_y > divs[:-1][:, nax]) & (co_y < divs[1:][:, nax])
tri_bools = (tymin < divs[1:][:, nax]) & (tymax > divs[:-1][:, nax])
for i, j in zip(co_bools, tri_bools):
if (np.sum(i) > 0) & (np.sum(j) > 0):
c3 = c[i]
t3 = t[j]
c_peat.append(np.repeat(c3, t3.shape[0]))
t_peat.append(np.tile(t3, c3.shape[0]))
def zx_grid(co_x, txmin, txmax, subs, c, t, c_peat, t_peat, co_y, tymin, tymax):
# create linespace grid between bottom and top of tri z
#subs = 7
t_min = np.min(txmin)
t_max = np.max(txmax)
divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)
# figure out which triangles and which co are in each section
co_bools = (co_x > divs[:-1][:, nax]) & (co_x < divs[1:][:, nax])
tri_bools = (txmin < divs[1:][:, nax]) & (txmax > divs[:-1][:, nax])
for i, j in zip(co_bools, tri_bools):
if (np.sum(i) > 0) & (np.sum(j) > 0):
c2 = c[i]
t2 = t[j]
zxy_grid(co_y[i], tymin[j], tymax[j], subs, c2, t2, c_peat, t_peat)
def z_grid(co_z, tzmin, tzmax, subs, co_x, txmin, txmax, co_y, tymin, tymax):
# create linespace grid between bottom and top of tri z
#subs = 7
t_min = np.min(tzmin)
t_max = np.max(tzmax)
divs = np.linspace(t_min, t_max, num=subs, dtype=np.float32)
# figure out which triangles and which co are in each section
co_bools = (co_z > divs[:-1][:, nax]) & (co_z < divs[1:][:, nax])
tri_bools = (tzmin < divs[1:][:, nax]) & (tzmax > divs[:-1][:, nax])
c_ranger = np.arange(co_bools.shape[1])
t_ranger = np.arange(tri_bools.shape[1])
c_peat = []
t_peat = []
for i, j in zip(co_bools, tri_bools):
if (np.sum(i) > 0) & (np.sum(j) > 0):
c = c_ranger[i]
t = t_ranger[j]
zx_grid(co_x[i], txmin[j], txmax[j], subs, c, t, c_peat, t_peat, co_y[i], tymin[j], tymax[j])
if (len(c_peat) == 0) | (len(t_peat) == 0):
return None, None
return np.hstack(c_peat), np.hstack(t_peat)
# -------------------------------------------------------
# -------------------------------------------------------
"""Combined with numexpr the first check min and max is faster
Combined without numexpr is slower. It's better to separate min and max"""
def v_per_tri(co, tri_min, tri_max, idxer, tridexer, c_peat=None, t_peat=None):
"""Checks each point against the bounding box of each triangle"""
co_x, co_y, co_z = co[:, 0], co[:, 1], co[:, 2]
subs = 7
#subs = bpy.data.objects['Plane.002'].mclo.grid_size
c_peat, t_peat = z_grid(co_z, tri_min[:, 2], tri_max[:, 2], subs, co_x, tri_min[:, 0], tri_max[:, 0], co_y, tri_min[:, 1], tri_max[:, 1])
if c_peat is None:
return
# X
# Step 1 check x_min (because we're N squared here we break it into steps)
check_x_min = co_x[c_peat] > tri_min[:, 0][t_peat]
c_peat = c_peat[check_x_min]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_x_min]
# Step 2 check x max
check_x_max = co_x[c_peat] < tri_max[:, 0][t_peat]
c_peat = c_peat[check_x_max]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_x_max]
# Y
# Step 3 check y min
check_y_min = co_y[c_peat] > tri_min[:, 1][t_peat]
c_peat = c_peat[check_y_min]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_y_min]
# Step 4 check y max
check_y_max = co_y[c_peat] < tri_max[:, 1][t_peat]
c_peat = c_peat[check_y_max]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_y_max]
# Z
# Step 5 check z min
check_z_min = co_z[c_peat] > tri_min[:, 2][t_peat]
c_peat = c_peat[check_z_min]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_z_min]
# Step 6 check y max
check_z_max = co_z[c_peat] < tri_max[:, 2][t_peat]
c_peat = c_peat[check_z_max]
if c_peat.shape[0] == 0:
return
t_peat = t_peat[check_z_max]
return idxer[c_peat], t_peat
#return c_peat, t_peat
def inside_triangles(tri_vecs, v2, co, tri_co_2, cidx, tidx, nor, ori, in_margin, offset=None):
idxer = np.arange(in_margin.shape[0], dtype=np.int32)[in_margin]
r_co = co[cidx[in_margin]]
r_tri = tri_co_2[tidx[in_margin]]
v0 = tri_vecs[:,0]
v1 = tri_vecs[:,1]
d00_d11 = np.einsum('ijk,ijk->ij', tri_vecs, tri_vecs)
d00 = d00_d11[:,0]
d11 = d00_d11[:,1]
d01 = np.einsum('ij,ij->i', v0, v1)
d02 = np.einsum('ij,ij->i', v0, v2)
d12 = np.einsum('ij,ij->i', v1, v2)
div = 1 / (d00 * d11 - d01 * d01)
u = (d11 * d02 - d01 * d12) * div
v = (d00 * d12 - d01 * d02) * div
# !!! Watch out for this number. It could affect speed !!!
if offset:
check = (u > -offset) & (v > -offset) & (u + v < offset + 1)
else:
check = (u > 0) & (v > 0) & (u + v < 1)
in_margin[idxer] = check
def object_collide(cloth_ob, col_ob):
cloth = get_cloth_data(cloth_ob)
col = get_collider_data(col_ob)
# for doing static cling
# cloth.col_idx = np.array([], dtype=np.int32)
# cloth.re_col = np.empty((0,3), dtype=np.float32)
proxy = col_ob.to_mesh(bpy.context.scene, True, 'PREVIEW')
# Recreate collider data if number of vertices is changing
if col.co.shape[0] != len(proxy.vertices):
col = create_collider_data(col_ob)
proxy_in_place(col, proxy)
apply_in_place(cloth_ob, cloth.co, cloth)
inner_margin = col_ob.mclo.object_collision_inner_margin
outer_margin = col_ob.mclo.object_collision_outer_margin
fudge = max(inner_margin, outer_margin)
# check object bounds: (need inner and out margins to adjust box size)
box_check, co1_min, co1_max = bounds_check(cloth.co, col.co, fudge)
# check for triangles inside the cloth bounds
#anim = col_ob.mclo.collision_animated
if box_check:
proxy_v_normals_in_place(col, True, proxy)
tri_co = col.co[col.tridex]
tri_vo = col.vel[col.tridex]
tris_in, tri_min, tri_max = triangle_bounds_check(tri_co, co1_min, co1_max, col.tridexer, fudge)#, object.ob.dimensions)
# check for verts in the bounds around the culled triangles
if np.any(tris_in):
tri_co_2 = tri_co[tris_in]
back_check = tri_back_check(cloth.co, tri_min, tri_max, cloth.idxer, fudge)
# begin every vertex co against every tri
if np.any(back_check):
v_tris = v_per_tri(cloth.co[back_check], tri_min, tri_max, cloth.idxer[back_check], col.tridexer[tris_in])
if v_tris is not None:
# update the normals. cross_vecs used by barycentric tri check
# move the surface along the vertex normals by the outer margin distance
marginalized = (col.co + col.v_normals * outer_margin)[col.tridex]
tri_normals_in_place(col, marginalized)
# add normals to make extruded tris
u_norms = col.normals[tris_in]
#u_norms = norms_2 / np.sqrt(np.einsum('ij, ij->i', norms_2, norms_2))[:, nax]
cidx, tidx = v_tris
ori = col.origins[tris_in][tidx]
nor = u_norms[tidx]
vec2 = cloth.co[cidx] - ori
d = np.einsum('ij, ij->i', nor, vec2) # nor is unit norms
in_margin = (d > -(inner_margin + outer_margin)) & (d < 0)#outer_margin) (we have offset outer margin)
# <<<--- Inside triangle check --->>>
# will overwrite in_margin:
cross_2 = col.cross_vecs[tris_in][tidx][in_margin]
inside_triangles(cross_2, vec2[in_margin], cloth.co, marginalized[tris_in], cidx, tidx, nor, ori, in_margin)
if np.any(in_margin):
# collision response --------------------------->>>
#if anim:
t_in = tidx[in_margin]
tri_vo = tri_vo[tris_in]
tri_vel1 = np.mean(tri_co_2[t_in], axis=1)
tri_vel2 = np.mean(tri_vo[t_in], axis=1)
tvel = tri_vel1 - tri_vel2
col_idx = cidx[in_margin]
cloth.co[col_idx] -= nor[in_margin] * (d[in_margin])[:, nax]
cloth.vel[col_idx] = tvel
# for doing static cling
# cloth.re_col = np.copy(cloth.co[col_idx])
# cloth.col_idx = col_idx
col.vel[:] = col.co
revert_in_place(cloth_ob, cloth.co)
#temp_ob = bpy.data.objects.new('__TEMP', proxy)
#for key in proxy.shape_keys.key_blocks:
# temp_ob.shape_key_remove(key)
#bpy.data.objects.remove(temp_ob)
bpy.data.meshes.remove(proxy)
# self collider =============================================
def self_collide(ob):
cloth = get_cloth_data(ob)
margin = ob.mclo.object_collision_outer_margin
tri_co = cloth.tri_co
tri_min = np.min(tri_co, axis=1) - margin
tri_max = np.max(tri_co, axis=1) + margin
# begin every vertex co against every tri
v_tris = v_per_tri(cloth.co, tri_min, tri_max, cloth.idxer, cloth.tridexer)
if v_tris is not None:
cidx, tidx = v_tris
u_norms = cloth.normals
# don't check faces the verts are part of
check_neighbors = cidx[:, nax] == cloth.tridex[tidx]
cull = np.any(check_neighbors, axis=1)
cidx, tidx = cidx[~cull], tidx[~cull]
ori = cloth.origins[tidx]
nor = u_norms[tidx]
vec2 = cloth.co[cidx] - ori
d = np.einsum('ij, ij->i', nor, vec2) # nor is unit norms
in_margin = (d > -margin) & (d < margin)
# <<<--- Inside triangle check --->>>
# will overwrite in_margin:
cross_2 = cloth.cross_vecs[tidx][in_margin]
inside_triangles(cross_2, vec2[in_margin], cloth.co, tri_co, cidx, tidx, nor, ori, in_margin, offset=0.0)
if np.any(in_margin):
# collision response --------------------------->>>
t_in = tidx[in_margin]
#tri_vel1 = np.mean(tri_co[t_in], axis=1)
#tvel = np.mean(tri_vo[t_in], axis=1)
#tvel = tri_vel1 - tri_vel2
t_vel = np.mean(cloth.vel[cloth.tridex][t_in], axis=1)
col_idx = cidx[in_margin]
d_in = d[in_margin]
sign_margin = margin * np.sign(d_in) # which side of the face
c_move = ((nor[in_margin] * d_in[:, nax]) - (nor[in_margin] * sign_margin[:, nax]))#) * -np.sign(d[in_margin])[:, nax]
#c_move *= 1 / cloth.ob.modeling_cloth_grid_size
#cloth.co[col_idx] -= ((nor[in_margin] * d_in[:, nax]) - (nor[in_margin] * sign_margin[:, nax]))#) * -np.sign(d[in_margin])[:, nax]
cloth.co[col_idx] -= c_move #* .7
#cloth.vel[col_idx] = 0
cloth.vel[col_idx] = t_vel
#col.vel[:] = col.co
# self collider =============================================
# update functions --------------------->>>
def tile_and_remove_neighbors(vidx, tidx, c_peat, t_peat):
tshape = tidx.shape[0]
vshape = vidx.shape[0]
# eliminate tris that contain the point:
# check the speed difference of doing a reshape with ravel at the end
co_tidex = c_peat.reshape(vshape, tshape)
tri_tidex = tidx[t_peat.reshape(vshape, tshape)]
check = tri_tidex == vidx[co_tidex][:,:,nax]
cull = ~np.any(check, axis=2)
# duplicate of each tri for each vert and each vert for each tri
c_peat = c_peat[cull.ravel()]
t_peat = t_peat[cull.ravel()]
return c_peat, t_peat
class ColliderData:
pass
class SelfColliderData:
pass
def get_collider_data(ob):
col_data = bpy.context.scene.modeling_cloth_data_set_colliders
col = None
for key, c in col_data.items():
if c.ob == ob:
col = c
if not col:
col = create_collider_data(ob)
return col
def create_collider_data(ob):
col_data = bpy.context.scene.modeling_cloth_data_set_colliders
col = ColliderData()
col_data[ob.name] = col
col.ob = ob
# get proxy
proxy = ob.to_mesh(bpy.context.scene, True, 'PREVIEW')
col.co = get_proxy_co(ob, None, proxy)
col.idxer = np.arange(col.co.shape[0], dtype=np.int32)
proxy_in_place(col, proxy)
col.v_normals = proxy_v_normals(col.ob, proxy)
col.vel = np.copy(col.co)
col.tridex = triangulate(proxy)
col.tridexer = np.arange(col.tridex.shape[0], dtype=np.int32)
# cross_vecs used later by barycentric tri check
proxy_v_normals_in_place(col, True, proxy)
marginalized = np.array(col.co + col.v_normals * ob.mclo.object_collision_outer_margin, dtype=np.float32)
col.cross_vecs, col.origins, col.normals = get_tri_normals(marginalized[col.tridex])
col.cross_vecs.dtype = np.float32
col.origins.dtype = np.float32
#col.normals.dtype = np.float32
# remove proxy
bpy.data.meshes.remove(proxy)
print('INFO: Collider data for', ob.name, 'is created!')
return col
# Self collision object
def create_self_collider(ob):
# maybe fixed? !!! bug where first frame of collide uses empty data. Stuff goes flying.
col = ColliderData()
col.ob = ob
col.co = get_co(ob, None)
proxy_in_place(col)
col.v_normals = proxy_v_normals(ob)
col.vel = | np.copy(col.co) | numpy.copy |
import numpy as np
import torch
from utils.bbox_tools import loc2bbox, bbox_iou, bbox2loc
class ProposalCreator:
"""
generate proposal ROIs by call this class
"""
def __init__(self,
rpn_model,
nms_thresh=0.7,
n_train_pre_nms=12000, # on training mode: keep top-n1 bboxes before NMS
n_train_post_nms=2000, # on training mode: keep top-n2 bboxes after NMS
n_test_pre_nms=6000, # on test mode: keep top-n3 bboxes before NMS
n_test_post_nms=300, # on test mode: keep top-n4 bboxes after NMS
min_size=16
):
self.rpn_model = rpn_model
self.nms_thresh = nms_thresh
self.n_train_pre_nms = n_train_pre_nms
self.n_train_post_nms = n_train_post_nms
self.n_test_pre_nms = n_test_pre_nms
self.n_test_post_nms = n_test_post_nms
self.min_size = min_size
def __call__(self, loc, score, anchor, img_size, scale=1.):
"""input should be ndarray
Propose RoIs.
Inputs :obj:`loc, score, anchor` refer to the same anchor when indexed
by the same index.
On notations, :math:`R` is the total number of anchors. This is equal
to product of the height and the width of an image and the number of
anchor bases per pixel.
Type of the output is same as the inputs.
Args:
loc (array): Predicted offsets and scaling to anchors.
Its shape is :math:`(R, 4)`.
score (array): Predicted foreground probability for anchors.
Its shape is :math:`(R,)`.
anchor (array): Coordinates of anchors. Its shape is
:math:`(R, 4)`.
img_size (tuple of ints): A tuple :obj:`height, width`,
which contains image size after scaling.
scale (float): The scaling factor used to scale an image after
reading it from a file.
Returns:
array:
An array of coordinates of proposal boxes.
Its shape is :math:`(S, 4)`. :math:`S` is less than
:obj:`self.n_test_post_nms` in test time and less than
:obj:`self.n_train_post_nms` in train time. :math:`S` depends on
the size of the predicted bounding boxes and the number of
bounding boxes discarded by NMS.
"""
# NOTE: when test, remember
# r_fcn.eval()
# to set self.traing = False
if self.rpn_model.training:
n_pre_nms = self.n_train_pre_nms
n_post_nms = self.n_train_post_nms
else:
n_pre_nms = self.n_test_pre_nms
n_post_nms = self.n_test_post_nms
# Convert the anchors to the ROIs
rois = loc2bbox(anchor, loc)
# clip rois
rois[:, slice(0, 4, 2)] = np.clip(
rois[:, slice(0, 4, 2)], 0, img_size[0])
rois[:, slice(1, 4, 2)] = np.clip(
rois[:, slice(1, 4, 2)], 0, img_size[1])
# remove small rois
min_size = self.min_size * scale
hs = rois[:, 2] - rois[:, 0] # height
ws = rois[:, 3] - rois[:, 1] # width
keep = np.where((hs >= min_size) & (ws >= min_size))[0]
rois = rois[keep, :]
score = score[keep]
# sorted by score
# get topN anchors to NMS, e.g.N=12000(training),6000(testing)
order = score.ravel().argsort()[::-1] # [::-1]表示倒序
if n_pre_nms > 0:
order = order[:n_pre_nms] # shape:(n_pre_nms, )
rois = rois[order, :]
score = score[order]
keep = torch.ops.torchvision.nms(
torch.from_numpy(rois).cuda(),
torch.from_numpy(score).cuda(),
self.nms_thresh
)
if n_post_nms > 0:
keep = keep[:n_post_nms]
rois = rois[keep.cpu().numpy()]
# rois_score = score[keep.cpu().numpy()]
return rois
class ProposalTargetCreator(object):
"""Assign ground truth bounding boxes to given RoIs.
The :meth:`__call__` of this class generates training targets
for each object proposal.
This is used to train Faster RCNN [#]_.
.. [#] <NAME>, <NAME>, <NAME>, <NAME>. \
Faster R-CNN: Towards Real-Time Object Detection with \
Region Proposal Networks. NIPS 2015.
Args:
n_sample (int): The number of sampled regions.
pos_ratio (float): Fraction of regions that is labeled as a
foreground.
pos_iou_thresh (float): IoU threshold for a RoI to be considered as a
foreground.
neg_iou_thresh_hi (float): RoI is considered to be the background
if IoU is in
[:obj:`neg_iou_thresh_hi`, :obj:`neg_iou_thresh_hi`).
neg_iou_thresh_lo (float): See above.
"""
def __init__(self,
n_sample=128,
pos_ratio=0.25, pos_iou_thresh=0.5,
neg_iou_thresh_hi=0.5, neg_iou_thresh_lo=0.0):
self.n_sample = n_sample
self.pos_ratio = pos_ratio
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh_hi = neg_iou_thresh_hi
self.neg_iou_thresh_lo = neg_iou_thresh_lo
def __call__(self, roi, bbox, label,
loc_normalize_mean=(0., 0., 0., 0.),
loc_normalize_std=(0.1, 0.1, 0.2, 0.2)):
"""Assigns ground truth to sampled proposals.
This function samples total of :obj:`self.n_sample` RoIs
from the combination of :obj:`roi` and :obj:`bbox`.
The RoIs are assigned with the ground truth class labels as well as
bounding box offsets and scales to match the ground truth bounding
boxes. As many as :obj:`pos_ratio * self.n_sample` RoIs are
sampled as foregrounds.
Offsets and scales of bounding boxes are calculated using
:func:`model.utils.bbox_tools.bbox2loc`.
Also, types of input arrays and output arrays are same.
Here are notations.
* :math:`S` is the total number of sampled RoIs, which equals \
:obj:`self.n_sample`.
* :math:`L` is number of object classes possibly including the \
background.
Args:
roi (array): Region of Interests (RoIs) from which we sample.
Its shape is :math:`(R, 4)`
bbox (array): The coordinates of ground truth bounding boxes.
Its shape is :math:`(R', 4)`.
label (array): Ground truth bounding box labels. Its shape
is :math:`(R',)`. Its range is :math:`[0, L - 1]`, where
:math:`L` is the number of foreground classes.
loc_normalize_mean (tuple of four floats): Mean values to normalize
coordinates of bouding boxes.
loc_normalize_std (tupler of four floats): Standard deviation of
the coordinates of bounding boxes.
Returns:
(array, array, array):
* **sample_roi**: Regions of interests that are sampled. \
Its shape is :math:`(S, 4)`.
* **gt_roi_loc**: Offsets and scales to match \
the sampled RoIs to the ground truth bounding boxes. \
Its shape is :math:`(S, 4)`.
* **gt_roi_label**: Labels assigned to sampled RoIs. Its shape is \
:math:`(S,)`. Its range is :math:`[0, L]`. The label with \
value 0 is the background.
"""
# get numbers of bbox
n_bbox, _ = bbox.shape
# Join GT bboxes
roi = np.concatenate((roi, bbox), axis=0)
# Preset number of positive samples
pos_roi_per_image = np.round(self.n_sample * self.pos_ratio)
# get IOU between roi and bbox
iou = bbox_iou(roi, bbox)
# argmax index of each ROI
gt_assignment = iou.argmax(axis=1)
# max IOU of each ROI
max_iou = iou.max(axis=1)
# label of each ROI, the positive label start with 1
gt_roi_label = label[gt_assignment] + 1
# positive ROIs
pos_index = np.where(max_iou >= self.pos_iou_thresh)[0]
pos_roi_per_this_image = int(min(pos_roi_per_image, pos_index.size))
if pos_index.size > 0:
pos_index = np.random.choice(
pos_index, size=pos_roi_per_this_image, replace=False
)
# negative ROIs
neg_index = np.where((max_iou < self.neg_iou_thresh_hi) &
(max_iou >= self.neg_iou_thresh_lo))[0]
# the number of negative ROIs
neg_roi_per_this_image = self.n_sample - pos_roi_per_this_image
neg_roi_per_this_image = int(min(neg_roi_per_this_image, neg_index.size))
if neg_index.size > 0:
neg_index = np.random.choice(
neg_index, size=neg_roi_per_this_image, replace=False
)
keep_index = np.append(pos_index, neg_index)
gt_roi_label = gt_roi_label[keep_index]
gt_roi_label[pos_roi_per_this_image:] = 0 # set the lable of neg ROIs to zero
sample_roi = roi[keep_index]
gt_roi_loc = bbox2loc(sample_roi, bbox[gt_assignment[keep_index]])
gt_roi_loc = ((gt_roi_loc - np.array(loc_normalize_mean, np.float32)) /
np.array(loc_normalize_std, np.float32))
return sample_roi, gt_roi_loc, gt_roi_label
class AnchorTargetCreator(object):
"""
Assign the ground truth bounding boxes to anchors.
params:
n_sample the numbers of sample anchors
pos_iou_thresh float, the anchor positive if its IOU with gt_bbox > pos_iou_thresh
neg_iou_thresh float, the anchor negative if its IOU with gt_bbox < neg_iou_thresh
pos_ratio: float, n_sample_pos / n_sample
"""
def __init__(self,
n_sample=256,
pos_iou_thresh=0.7, neg_iou_thresh=0.3,
pos_ratio=0.5):
self.n_sample = n_sample
self.pos_iou_thresh = pos_iou_thresh
self.neg_iou_thresh = neg_iou_thresh
self.pos_ratio = pos_ratio
def __call__(self, gt_bbox, anchor, img_size):
"""Assign ground truth supervision to sampled subset of anchors.
Types of input arrays and output arrays are same.
Here are notations.
* :math:`S` is the number of anchors.
* :math:`R` is the number of bounding boxes.
Args:
bbox (array): Coordinates of bounding boxes. Its shape is
:math:`(R, 4)`.
anchor (array): Coordinates of anchors. Its shape is
:math:`(S, 4)`.
img_size (tuple of ints): A tuple :obj:`H, W`, which
is a tuple of height and width of an image.
Returns:
(array, array):
#NOTE: it's scale not only offset
* **loc**: Offsets and scales to match the anchors to \
the ground truth bounding boxes. Its shape is :math:`(S, 4)`.
* **label**: Labels of anchors with values \
:obj:`(1=positive, 0=negative, -1=ignore)`. Its shape \
is :math:`(S,)`.
"""
img_H, img_W = img_size
n_anchor = len(anchor)
# Get the index of anchors completely inside the image, e.g. array[0, 1, 3, 6]
inside_index = _get_inside_index(anchor, img_H, img_W)
anchor = anchor[inside_index]
# shape: (inside_anchor_num, ) & (inside_anchor_num, )
achor_argmax_ious, anchor_label = self._create_label(anchor, gt_bbox)
# compute bounding box regression targets
anchor_loc = bbox2loc(anchor, gt_bbox[achor_argmax_ious]) # shape:(inside_anchor_num, 4)
# map up to original set of anchors
anchor_label = _unmap(anchor_label, n_anchor, inside_index, fill=-1) # shape:(n_anchor, )
anchor_loc = _unmap(anchor_loc, n_anchor, inside_index, fill=0) # shape:(n_anchor, 4)
return anchor_loc, anchor_label
def _create_label(self, anchor, gt_bbox):
# label: 1 is positive, 0 is negative, -1 is dont care
anchor_label = np.empty((anchor.shape[0], ), dtype=np.int32) # shape:(inside_anchor_num, 4)
anchor_label.fill(-1) # 初始化anchor标记为-1(弃用)
anchor_argmax_ious, anchor_max_ious, gt_argmax_ious = self._calc_ious(anchor, gt_bbox)
'''assign labels'''
# assign negative labels first so that positive labels can clobber them
anchor_label[anchor_max_ious < self.neg_iou_thresh] = 0
# positive label: for each gt, anchor with highest iou
anchor_label[gt_argmax_ious] = 1
# positive label: above threshold IOU
anchor_label[anchor_max_ious >= self.pos_iou_thresh] = 1
# subsample positive labels if we have too many
n_pos = int(self.pos_ratio * self.n_sample)
pos_index = | np.where(anchor_label == 1) | numpy.where |
import torch
from config import cfg
import numpy as np
from utils.box_overlaps import *
from utils.anchor_utils import *
def cal_anchors():
# Output:
# Anchors: (w, l, 2, 7) x y z h w l r
x = np.linspace(cfg.SCENE_X_MIN, cfg.SCENE_X_MAX, cfg.FEATURE_WIDTH)
y = np.linspace(cfg.SCENE_Y_MIN, cfg.SCENE_Y_MAX, cfg.FEATURE_HEIGHT)
cx, cy = np.meshgrid(x, y)
# All are (w, l, 2)
cx = | np.tile(cx[..., np.newaxis], 2) | numpy.tile |
import numpy as np
from utils import load_data
from proposals import compute_iou, get_majority
class ClusterDetProcessor(object):
def __init__(self, dataset):
self.dataset = dataset
self.dtype = np.float32
def __len__(self):
return self.dataset.size
def build_graph(self, fn_node, fn_edge):
""" build graph from graph file
- nodes: NxD,
each row represents the feature of a node
- adj: NxN,
a symmetric similarity matrix with self-connection
"""
node = load_data(fn_node)
edge = load_data(fn_edge)
assert len(node) > 1, '#node of {}: {}'.format(fn_node, len(node))
# take majority as label of the graph
if not self.dataset.ignore_meta:
lb2cnt = {}
for idx in node:
if idx not in self.dataset.idx2lb:
continue
lb = self.dataset.idx2lb[idx]
if lb not in lb2cnt:
lb2cnt[lb] = 0
lb2cnt[lb] += 1
gt_lb, _ = get_majority(lb2cnt)
gt_node = self.dataset.lb2idxs[gt_lb]
iou = compute_iou(node, gt_node)
else:
iou = -1.
# compute adj
node = list(node)
abs2rel = {}
for i, n in enumerate(node):
abs2rel[n] = i
size = len(node)
adj = np.eye(size)
for e in edge:
w = 1.
if len(e) == 2:
e1, e2 = e
elif len(e) == 3:
e1, e2, dist = e
if not self.dataset.wo_weight:
w = 1. - dist
else:
raise ValueError('Unknown length of e: {}'.format(e))
v1 = abs2rel[e1]
v2 = abs2rel[e2]
adj[v1][v2] = w
adj[v2][v1] = w
if self.dataset.featureless:
vertices = adj.sum(axis=1, keepdims=True)
vertices /= vertices.sum(axis=1, keepdims=True)
else:
vertices = self.dataset.features[node, :]
if self.dataset.is_norm_adj:
adj /= adj.sum(axis=1, keepdims=True)
return vertices, adj, iou
def __getitem__(self, idx):
""" each vertices is a NxD matrix,
each adj is a NxN matrix,
each label is a Nx1 matrix,
which is a 0 or 1 representing the foreground and background
"""
if idx is None or idx > self.dataset.size:
raise ValueError('idx({}) is not in the range of {}'.format(idx, self.dataset.size))
fn_node, fn_edge = self.dataset.lst[idx]
ret = self.build_graph(fn_node, fn_edge)
assert ret is not None
vertices, adj, label = ret
return vertices.astype(self.dtype), \
adj.astype(self.dtype), \
| np.array(label, dtype=self.dtype) | numpy.array |
# <NAME> 2017
# GMM implementation I made for a computer vision course during my honours degree at Wits
import numpy as np
from sklearn.mixture import GaussianMixture
from scipy.stats import multivariate_normal
# These are functions which can be run on GMMs
class fn():
def zero_init(data, K):
lambda_vect = np.full((K), 1.0/K)
# init randomly between (0,1]
# positive semi-def but already is
# sigma_vect = np.full((K), np.var(data)) # diagonal
sigma_list = []
mean_list = []
for k in range(K):
mean = (1.-0.)*np.random.random_sample((data.shape[1])) + 0.
mean_list.append(mean)
sig = (1.0-0.001)*np.random.random_sample((data.shape[1],data.shape[1])) + 0.001
sig = np.dot(sig, sig.T)
sig = np.diag(np.diag(sig))
sigma_list.append(sig)
sigma = np.array(sigma_list)
mean_vect = np.array(mean_list)
# print(mean_vect)
# print(lambda_vect)
return lambda_vect, mean_vect, sigma
def naive_bayes_classifier(data, GMM_fg, GMM_bg, prior, confidence=0.65):
# test_label[i] = np.argmax(p)#(p>confidence)
p1 = GMM_fg.probability(data)
p2 = GMM_bg.probability(data)
l1 = prior
l2 = 1 - prior
prob = np.divide(p1*l1, p1*l1 + p2*l2)
# true if GMM_fg is greater
if (prob > confidence):
return True;
return False;
def classifier(data, GMM_fg, GMM_bg):
# print("test")
p1 = GMM_fg.probability(data)
# print("test: ", p1)
p2 = GMM_bg.probability(data)
# print("test: ", p2)
# true if GMM_fg is greater
if (p1 > p2):
return True;
return False;
def error(test_vector, label_vector, GMM_fg, GMM_bg):
test_label = np.zeros(test_vector.shape[0])
sum = 0
for i in range(test_vector.shape[0]):
test_label[i] = fn.classifier(test_vector.values[i], GMM_fg, GMM_bg)
if test_label[i] != label_vector[i]:
sum = sum + 1
# return np.sum(np.absolute(test_label-label_vector))/(label_vector.shape[0]*label_vector.shape[1])
return sum/label_vector.shape[0]
def bayes_error(test_vector, label_vector, GMM_fg, GMM_bg, prior, confidence=0.65):
test_label = | np.zeros(test_vector.shape[0]) | numpy.zeros |
# Covid Detection WebService
#
# $ curl -XPOST -F "[email protected]" http://127.0.0.1:5001
from flask import Flask, jsonify, request, redirect
from flask_ngrok import run_with_ngrok
import cv2
import PIL
import numpy as np
from sklearn.svm import NuSVC
import pickle
from scipy.io import loadmat
import time
# Create An SVM
FV = loadmat('features.mat')
X = FV['data']
Y = FV['labels']
# fix a bug where saving with scipy makes the matrix transposed.
Y = Y.transpose()
clf = NuSVC(nu=0.4, kernel='rbf', gamma=0.009876939713502824, shrinking=True, tol=0.00001,
max_iter=176, random_state=1, class_weight='balanced', probability=True)
clf.fit(X, Y)
# Save the SVM to be used on the endpoint.
with open('svm_model.pkl', 'wb') as f:
pickle.dump(clf, f)
# Perform all the steps at once and return a probabilty for covid.
def detect(imagePath, models):
s_time = time.time()
covid19 = False
image = imagePath
feature_extractor, clf = models
image = cv2.resize(image, (224, 224)) / 255.
features = feature_extractor( | np.array([image]) | numpy.array |
import argparse
import os
import numpy as np
from PIL import Image, ImageFont, ImageDraw, ImageOps, ImageFilter
from tqdm import tqdm
import imageio
def get_ascii_chars():
return ''.join(chr(x) for x in range(32, 127))
def get_chinese_chars():
return open('assets/cn_charset.txt').read().strip() + ' '
def is_ascii(s):
try:
s.encode('ascii')
except UnicodeEncodeError:
return False
else:
return True
def read_gif(path):
im_list = imageio.mimread(path)
im_list = [im[:, :, :3] for im in im_list]
for i in range(1, len(im_list)):
im_list[i] = np.where(im_list[i]>0, im_list[i], im_list[i-1])
duration = Image.open(path).info['duration'] / 1000.
return im_list, duration
class Font():
def __init__(self, path, size):
self.font = ImageFont.truetype(font=path, size=size)
width, height = self.font.getsize('A')
self.patch_size = max(width, height)
self.x_offset = (self.patch_size-width)//2
self.y_offset = (self.patch_size-height)//2
def get_patches(self, chars):
size = self.patch_size
patches = np.zeros([len(chars), size, size], dtype=np.uint8)
if len(set(chars)) != len(chars):
raise Exception('Duplicate characters exist')
for i, c in enumerate(chars):
p = np.zeros([size, size], dtype=np.uint8)
p = Image.fromarray(p)
draw = ImageDraw.Draw(p)
draw.text([self.x_offset, self.y_offset], c, fill='white', font=self.font)
patches[i] = p
return patches
def get_rank(arr):
temp = np.argsort(arr)
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(arr))
return ranks
# get intensities for patches
def get_intensities(patches):
densities = np.mean(patches, (1, 2)) / 255.
intensities = get_rank(densities)
scale = 255./np.max(intensities)
intensities = intensities.astype(np.float32)
intensities *= scale
intensities = intensities.astype(np.uint8)
return intensities
# get a 256-element numpy array containing the index of characters
def get_intensity2idx(chars, intensities):
d = {}
for idx, intensity in zip(range(len(chars)), intensities):
if intensity in d:
d[intensity].append(idx)
else:
d[intensity] = [idx]
unique_intensities = []
char_idx = []
for intensity in d:
unique_intensities.append(intensity)
char_idx.append(np.random.choice(d[intensity]))
unique_intensities = np.array(unique_intensities, dtype=np.uint8)
char_idx = np.array(char_idx, dtype=np.int64)
intensity2idx = np.arange(256, dtype=np.int64)
intensity2idx = intensity2idx[:, np.newaxis] - unique_intensities[np.newaxis, :]
intensity2idx = np.abs(intensity2idx)
intensity2idx = np.argmin(intensity2idx, -1)
intensity2idx = char_idx[intensity2idx]
return intensity2idx
# convert one frame to text image
def im2text(im, patches, intensity2idx, grayscale=False):
im = | np.array(im, dtype=np.uint8) | numpy.array |
#!/usr/bin/env python3
""" Use TT-SVD to approximate a large square SVD """
__authors__ = ['<NAME> <<EMAIL>>']
__date__ = '2020-12-16'
import numpy as np
import pitts_py
import functools
import timeit
import argparse
def timer(func):
"""measure runtime of the decorated function"""
@functools.wraps(func)
def wrapper_fun(*args, **kwargs):
wtime = timeit.default_timer()
value = func(*args, **kwargs)
wtime = timeit.default_timer() - wtime
print(func.__name__, "wtime:", wtime)
return value
return wrapper_fun
@timer
def random_square_low_rank_matrix(n, r):
sigma = np.linspace(1, 0, r, endpoint=False, dtype=np.float64)
U, _ = np.linalg.qr(np.random.rand(n,r).astype(dtype=np.float64))
Vt, _ = np.linalg.qr( | np.random.rand(n,r) | numpy.random.rand |
import numpy as np
import scipy as sp
from functools import reduce
import time
import os
import sys
import tempfile
import h5py
import pyscf
from pyscf import gto, scf
from pyscf import mcscf
from pyscf.mcscf import addons
from pyscf.dmrgscf import dmrgci
from pyscf import dft
from pyscf.dft import numint
class MCPDFT:
def __init__(self, mol, mc, ref_method=None):
self.ref_method = ref_method
self.cas = mc
if mol == None:
self.mol = gto.Mole()
else:
self.mol = mol
# Check settings for DMRG
if (self.ref_method == 'DMRG'):
try:
from pyscf.dmrgscf import settings
except ImportError:
settings = lambda: None
settings.BLOCKEXE = getattr(__config__, 'dmrgscf_BLOCKEXE', None)
settings.BLOCKEXE_COMPRESS_NEVPT = \
getattr(__config__, 'dmrgscf_BLOCKEXE_COMPRESS_NEVPT', None)
settings.BLOCKSCRATCHDIR = getattr(__config__, 'dmrgscf_BLOCKSCRATCHDIR', None)
settings.BLOCKRUNTIMEDIR = getattr(__config__, 'dmrgscf_BLOCKRUNTIMEDIR', None)
settings.MPIPREFIX = getattr(__config__, 'dmrgscf_MPIPREFIX', None)
settings.BLOCKVERSION = getattr(__config__, 'dmrgscf_BLOCKVERSION', None)
if (settings.BLOCKEXE is None or settings.BLOCKSCRATCHDIR is None):
import sys
sys.stderr.write('settings.py not found for module dmrgci. Please create %s\n'
% os.path.join(os.path.dirname(__file__), 'settings.py'))
raise ImportError('settings.py not found')
#self.cas.fcisolver = dmrgci.DMRGCI(mol, maxM=2000, tol=1.e-8)
#self.cas.callback = self.cas.fcisolver.restart_scheduler_()
#if self.cas.chkfile == self.cas._scf._chkfile.name:
# # Do not delete chkfile after mcscf
# self.cas.chkfile = tempfile.mktemp(dir=settings.BLOCKSCRATCHDIR)
# if not os.path.exists(settings.BLOCKSCRATCHDIR):
# os.makedirs(settings.BLOCKSCRATCHDIR)
self.ci = mc.ci
self.C_mo = mc.mo_coeff
self.nao = mc.mo_coeff.shape[0]
self.nmo = mc.mo_coeff.shape[1]
self.nfrz = mc.frozen
self.ncore = mc.ncore
self.ncas = mc.ncas
self.nelecas = mc.nelecas
self.nocc = self.ncore + self.ncas
self.virt = self.nmo - self.nocc
self.amo = self.nmo - self.ncore - (0 if self.nfrz == None else self.nfrz)
self.write_hdf5 = False
def _print_active_space(self):
print("\n")
print("----------------------------------------------")
print(" Number of AOs: %s" % self.nao)
print(" Number of MOs: %s" % self.nmo)
print(" Number of frozen orbitals: %s" % ('0' if self.nfrz == None else self.nfrz) )
print(" Number of core orbitals: %s" % self.ncore)
print(" Number of active orbitals: %s" % self.ncas)
print(" Number of active alpha electrons: %s" % self.nelecas[0])
print(" Number of active beta electrons: %s" % self.nelecas[1])
print("----------------------------------------------")
print("\n")
def make_active_rdm1s(self, cas=None, ci=None):
if cas is None: cas = self.cas
ncas = cas.ncas
nelecas = cas.nelecas
if self.ref_method == 'MCSCF':
if ci is None: ci = self.ci
casdm1a, casdm1b = cas.fcisolver.make_rdm1s(ci, ncas, nelecas)
else: # ref_method == 'DMRG'
# first argument takes 0 for ground and 1 for excited state
casdm1a, casdm1b = cas.fcisolver.make_rdm1s(0, ncas, nelecas)
return casdm1a, casdm1b
def make_full_rdm1s(self, cas=None, ci=None):
if cas is None: cas = self.cas
if ci is None: ci = self.ci
nmo = cas.mo_coeff.shape[1]
ncore = cas.ncore
ncas = cas.ncas
nocc = ncore + ncas
nelecas = cas.nelecas
# building core part
dm1a = np.zeros((nmo,nmo))
dm1b = | np.zeros((nmo,nmo)) | numpy.zeros |
# dataloader of augmented original t-less dataset
import torch.utils.data as data
from PIL import Image
import os
import os.path
import torch
import numpy as np
import torchvision.transforms as transforms
import argparse
import time
import random
import numpy.ma as ma
import scipy.misc
import scipy.io as scio
import yaml
import json
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import open3d as o3d
proj_dir = os.getcwd()+'/'
# proj_dir = '/home/lthpc/yifeis/pose/StablePose/'
# proj_dir = '/home/dell/yifeis/pose/pose_est_tless_3d/'
class PoseDataset(data.Dataset):
def __init__(self, mode, num_pt, add_noise, root, noise_trans, refine):
if mode == 'train':
self.mode = 'train'
self.path = proj_dir + 'datasets/tless/dataset_config/bop_train_list.txt'
elif mode == 'test':
self.mode = 'test'
self.path = proj_dir + 'datasets/tless/dataset_config/bop_final_test_list.txt'
self.num_pt = num_pt
self.root = root
self.add_noise = add_noise
self.noise_trans = noise_trans
self.model_root = root
self.list = []
# self.real = []
self.syn = []
input_file = open(self.path) # open data folder(include png,depth,label mat)
while 1:
input_line = input_file.readline()
if not input_line:
break
if input_line[-1:] == '\n':
input_line = input_line[:-1]
# if input_line[:5] == 'train_primesense/':
# self.real.append(input_line)
# else:
# self.syn.append(input_line)
self.list.append(input_line)
input_file.close()
self.length = len(self.list)
# self.len_real = len(self.real)
self.len_syn = len(self.syn)
model_info_file = open('{0}/models_reconst/models_info.json'.format(self.model_root), 'r', encoding='utf-8')
self.model_info = json.load(model_info_file)
self.cld = {}
for class_id in range(1, 31):
self.cld[class_id] = []
mesh = o3d.io.read_triangle_mesh('{0}/models_reconst/obj_{1}.ply'.format(self.model_root, str(class_id).zfill(6)))
pcd = mesh.sample_points_uniformly(number_of_points=10000)
pcd = np.asarray(pcd.points)
# displayPoint(pcd, pcd,k)
self.cld[class_id] = pcd
if self.mode == 'train':
self.xmap = np.array([[j for i in range(400)] for j in range(400)])
self.ymap = np.array([[i for i in range(400)] for j in range(400)])
self.rt_list = []
self.patch_num_list = []
self.crop_size_list = []
self.gt_list = []
self.info_list = []
self.cam_list = []
N = 31
for i in range(1, N):
datadir = 'train_primesense/' + str(i).zfill(6)
info_file = open('{0}/{1}/scene_gt_info.json'.format(self.root, datadir), 'r', encoding='utf-8')
gt_file = open('{0}/{1}/scene_gt.json'.format(self.root, datadir), 'r', encoding='utf-8')
cam_file = open('{0}/{1}/scene_camera.json'.format(self.root, datadir), 'r', encoding='utf-8')
info = json.load(info_file)
gt = json.load(gt_file)
cam = json.load(cam_file)
self.info_list.append(info)
self.gt_list.append(gt)
self.cam_list.append(cam)
print('loading training ' + str(i) + 'json files')
else:
self.xmap = np.array([[j for i in range(720)] for j in range(540)])
self.ymap = np.array([[i for i in range(720)] for j in range(540)])
self.gt_list = []
self.info_list = []
self.cam_list = []
self.patch_num_list = []
############# load json
for i in range(1, 21):
datadir = 'test_primesense/' + str(i).zfill(6)
info_file = open('{0}/{1}/scene_gt_info.json'.format(self.root, datadir), 'r', encoding='utf-8')
gt_file = open('{0}/{1}/scene_gt.json'.format(self.root, datadir), 'r', encoding='utf-8')
cam_file = open('{0}/{1}/scene_camera.json'.format(self.root, datadir), 'r', encoding='utf-8')
info = json.load(info_file)
gt = json.load(gt_file)
cam = json.load(cam_file)
self.info_list.append(info)
self.gt_list.append(gt)
self.cam_list.append(cam)
print('loading testing '+str(i)+' yml files')
self.trancolor = transforms.ColorJitter(0.2, 0.2, 0.2, 0.05)
self.noise_img_loc = 0.0
self.noise_img_scale = 7.0
self.minimum_num_pt = 100
self.trans = transforms.ToTensor()
self.norm1 = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self.norm2 = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
self.symmetry_obj_idx = [1-1, 2-1, 3-1, 4-1, 13-1, 14-1, 15-1, 16-1, 17-1,
24-1, 30-1,5-1, 6-1, 7-1, 8-1, 9-1, 10-1, 11-1,
12-1, 19-1, 20-1, 23-1, 25-1, 26-1, 27-1, 28-1, 29-1]
# self.rot_index = [1, 2, 3, 4, 13, 14, 15, 16, 17, 24, 30]
# self.ref_index = [5, 6, 7, 8, 9, 10, 11, 12, 19, 20, 23, 25, 26, 27, 28, 29]
# self.nosym_obj_idx = [18, 21, 22]
self.rot_obj_idx = [1-1, 2-1, 3-1, 4-1, 13-1, 14-1, 15-1, 16-1, 17-1, 24-1, 30-1]
self.ref_obj_idx = [5-1, 6-1, 7-1, 8-1, 9-1, 10-1, 11-1, 12-1, 19-1, 20-1,
23-1, 25-1, 26-1, 27-1, 28-1, 29-1]
self.nosym_obj_idx = [18-1, 21-1, 22-1]
self.num_pt_mesh_small = 5000
self.num_pt_mesh_large = 1000
self.refine = refine
self.front_num = 2
self.img_noise = True
self.t_noise = True
# print(len(self.list))
def __getitem__(self, index):
if self.mode == 'train':
data_dir = self.list[index][:-7]
dir_num = self.list[index][-13:-7]
data_num = self.list[index][-6:]
info = self.info_list[int(dir_num) - 1]
gt = self.gt_list[int(dir_num) - 1]
cam = self.cam_list[int(dir_num) - 1]
choose_file = '{0}/{1}/{2}/{3}_choose.list'.format(self.root, data_dir, 'segmentation', data_num)
label = Image.open(
'{0}/{1}/{2}/{3}_{4}.png'.format(self.root, data_dir, 'mask_visib_occ', data_num, str(0).zfill(6)))
img = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'rgb_occ', data_num))
depth = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'depth', data_num))
patch_file = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'segmentation', data_num))
normal_file = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'normal', data_num))
choose_file = '{0}/{1}/{2}/{3}_choose.list'.format(self.root, data_dir, 'segmentation', data_num)
choose_ls = []
stable_ls = []
try:
with open(choose_file) as f:
data = f.readlines()
if len(data) > 1:
for ids in data:
choose_id = ids[:-1].split(',')[:-1]
stable = float(ids[:-1].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
if data[0] != '0':
choose_id = data[0].split(',')[:-1]
stable = float(data[0].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
stable_ls.append(0)
except(OSError):
print('choose_list file not exist')
stable_ls.append(0)
choose_ls = []
data = ['0']
# if self.img_noise:
# img = self.trancolor(img)
patch_label = np.array(patch_file)
depth = np.array(depth)
mask_occ = np.array(label)
normal = np.array(normal_file)
cam_k = cam[str(int(data_num))]['cam_K']
depth_scale = cam[str(int(data_num))]['depth_scale']
cam_k = np.array(cam_k).reshape(3, 3)
obj_bb = info[str(int(data_num))][0]['bbox_visib']
obj_id = gt[str(int(data_num))][0]['obj_id']
model_info = self.model_info[str(obj_id)]
depth_mask = ma.getmaskarray(ma.masked_not_equal(depth, 0))
mask_label = ma.getmaskarray(ma.masked_not_equal(mask_occ, 0))
mask = mask_label * depth_mask
# mask_of_normal = mask.reshape(400, 400, 1).repeat(3, 2)
# normal_masked = normal*mask_of_normal
# mask_depth = mask * depth
# mask_patch = mask * patch_label
target_r = gt[str(int(data_num))][0]['cam_R_m2c']
target_r = np.array(target_r).reshape(3, 3).T
target_t = np.array(gt[str(int(data_num))][0]['cam_t_m2c'])
target_t = target_t / 1000
rt = np.append(target_r, target_t).reshape(1, 12)
add = np.array([[0, 0, 0, 1]])
target_trans = np.append(target_r.T, target_t.reshape(3, 1), axis=1)
target_trans = np.append(target_trans, add, axis=0)
rmin, rmax, cmin, cmax = get_bbox(mask_label)
img_masked = self.trans(img)[:, rmin:rmax, cmin:cmax]
img_masked = torch.zeros(img_masked.shape)
# img_masked1 = self.norm1(img_masked[:, rmin:rmax, cmin:cmax])
# img_masked2 = self.norm1(img_masked[:, rmin:rmax, cmin:cmax])
# img = np.array(img)
# img_masked_ = np.transpose(img[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax]
choose = mask[rmin:rmax, cmin:cmax].flatten().nonzero()[0]
if len(choose) > self.num_pt:
c_mask = np.zeros(len(choose), dtype=int)
c_mask[:self.num_pt] = 1
np.random.shuffle(c_mask)
choose = choose[c_mask.nonzero()]
else:
if len(choose) == 0:
print(0)
choose = np.pad(choose, (0, self.num_pt - len(choose)), 'wrap')
normal_maskd = normal[rmin:rmax, cmin:cmax].reshape(-1,3)[choose][:,:, np.newaxis].astype(np.float32)
patch_masked = patch_label[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
depth_masked = depth[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
xmap_masked = self.xmap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
ymap_masked = self.ymap[rmin:rmax, cmin:cmax].flatten()[choose][:, np.newaxis].astype(np.float32)
choose = np.array([choose])
cam_cx = cam_k[0, 2]
cam_cy = cam_k[1, 2]
cam_fx = cam_k[0, 0]
cam_fy = cam_k[1, 1]
pt3 = patch_masked
pt2 =depth_masked*depth_scale / 1000
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
cloud = np.concatenate((pt0, pt1, pt2, pt3), axis=1)
nx = normal_maskd[:, 0] / 255.0 * 2 - 1
ny = normal_maskd[:, 1] / 255.0 * 2 - 1
nz = normal_maskd[:, 2] / 255.0 * 2 - 1
normals = np.concatenate((nx, ny, nz), axis=1)
dellist = [j for j in range(0, len(self.cld[obj_id]))]
if self.refine:
dellist = random.sample(dellist, len(self.cld[obj_id]) - self.num_pt_mesh_large)
else:
dellist = random.sample(dellist, len(self.cld[obj_id]) - self.num_pt_mesh_small)
model_points = np.delete(self.cld[obj_id], dellist, axis=0)
model_points = model_points / 1000
target = np.dot(model_points, target_r)
target = np.add(target, target_t)
# displayPoint(cloud, target, index)
# patch_occ = patch_label[rmin:rmax, cmin:cmax]
# num_patch = np.max(patch_occ)
# num_list = []
# for n in range(1, num_patch + 1):
# num = str(patch_occ.tolist()).count(str(n))
# num_list.append(num)
#
# num_list_new = []
# patch_id_list = []
# for m in num_list:
# if m > 100:
# num_list_new.append(m)
# patch_id_list.append(num_list.index(m) + 1)
im_id = 0
scene_id = 0
if self.mode == 'test':
data_dir = self.list[index][:23]
dir_num = self.list[index][16:22]
data_num = self.list[index][23:29]
obj_order = self.list[index][30:]
idx = int(obj_order)
im_id = int(data_num)
scene_id = int(dir_num)
info = self.info_list[int(dir_num) - 1]
gt = self.gt_list[int(dir_num) - 1]
cam = self.cam_list[int(dir_num) - 1]
obj_num = len(gt[str(int(data_num))])
obj_id = gt[str(int(data_num))][idx]['obj_id']
model_info = self.model_info[str(obj_id)]
depth = np.array(Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'depth', data_num)))
label = np.array(Image.open(
'{0}/{1}/{2}/{3}_{4}.png'.format(self.root, data_dir, 'mask_visib', data_num, str(idx).zfill(6))))
patch_label = np.array(Image.open(
'{0}/{1}/{2}/{3}_{4}.png'.format(self.root, data_dir, 'segmentation', data_num, str(idx).zfill(6))))
img = Image.open('{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'rgb', data_num.zfill(6)))
normal = np.array(Image.open(
'{0}/{1}/{2}/{3}.png'.format(self.root, data_dir, 'normal', data_num, str(idx).zfill(6))))
choose_file = '{0}/{1}/{2}/{3}_{4}_choose.list'.format(self.root, data_dir, 'segmentation', data_num, str(idx).zfill(6))
with open(choose_file) as f:
data = f.readlines()
choose_ls = []
stable_ls = []
if len(data) > 1:
for ids in data:
choose_id = ids[:-1].split(',')[:-1]
stable = float(ids[:-1].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
if data[0] != '0':
choose_id = data[0].split(',')[:-1]
stable = float(data[0].split(',')[-1])
choose_ls.append([int(x) for x in choose_id])
stable_ls.append(stable)
else:
stable_ls.append(0)
depth_scale = cam[str(int(data_num))]['depth_scale']
cam_k = cam[str(int(data_num))]['cam_K']
cam_k = np.array(cam_k).reshape(3, 3)
cam_cx = cam_k[0, 2]
cam_cy = cam_k[1, 2]
cam_fx = cam_k[0, 0]
cam_fy = cam_k[1, 1]
obj_bb = info[str(int(data_num))][idx]['bbox_visib']
cmin = obj_bb[0]
cmax = cmin + obj_bb[2]
rmin = obj_bb[1]
rmax = rmin + obj_bb[3]
img_masked = self.trans(img)[:, rmin:rmax, cmin:cmax]
img_masked = torch.zeros(img_masked.shape)
# img_masked = np.transpose(np.array(img)[:, :, :3], (2, 0, 1))[:, rmin:rmax, cmin:cmax]
mask_depth = ma.getmaskarray(ma.masked_not_equal(depth, 0))
mask_label = ma.getmaskarray(ma.masked_equal(label, 255))
mask = mask_label * mask_depth
mask_patch = mask * patch_label
mask_num = len(mask.flatten().nonzero()[0])
target_r = gt[str(int(data_num))][idx]['cam_R_m2c']
target_r = | np.array(target_r) | numpy.array |
import os
import time
import imgaug as ia
import numpy as np
from PIL import Image
from imgaug import augmenters as iaa
from pycocotools import mask as cocomask
def from_pil(*images):
images = [ | np.array(image) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Nov 27 2019
@author: changyuchang
"""
import numpy as np
from functools import partial
def no_selection(community_function):
"""
Direct well-to-well transfer without selection
"""
n_wells = len(community_function)
return np.eye(n_wells)
# Make selection algorithms with similar names, using partial functions
## Select top n%
def temp_select_top(community_function, p):
n_wells = len(community_function)
sorted_community_function = np.sort(community_function)
cut_off = sorted_community_function[int(np.floor(len(community_function)*(1-p)))]
winner_index = np.where(community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
t_new = range(n_wells) # New wells
t_old = list(winner_index) * (int(np.ceil(1/p) + 1)) # Old wells
for i in range(n_wells):
transfer_matrix[t_new[i], t_old[i]] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['select_top%spercent' %i] = partial(temp_select_top, p = i/100)
## Select top n% control
def temp_select_top_control(community_function, p):
n_wells = len(community_function)
randomized_community_function = community_function.copy()
np.random.shuffle(randomized_community_function)
sorted_community_function = np.sort(randomized_community_function)
cut_off = sorted_community_function[int(np.floor(len(randomized_community_function)*(1-p)))]
winner_index = np.where(randomized_community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
t_new = range(n_wells) # New wells
t_old = list(winner_index) * (int(np.ceil(1/p)+1)) # Old wells
for i in range(n_wells):
transfer_matrix[t_new[i], t_old[i]] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['select_top%spercent_control' %i] = partial(temp_select_top_control, p = i/100)
## Pooling
def temp_pool_top(community_function, p):
n_wells = len(community_function)
sorted_community_function = np.sort(community_function)
cut_off = sorted_community_function[int(np.floor(len(community_function)*(1-p)))]
winner_index = np.where(community_function >= cut_off)[0][::-1]
transfer_matrix = np.zeros((n_wells,n_wells))
transfer_matrix[:, list(winner_index)] = 1
return transfer_matrix
for i in [10, 15, 16, 20, 25, 28, 30, 33, 40, 50, 60]:
globals()['pool_top%spercent' %i] = partial(temp_pool_top, p = i/100)
## Pooling control
def temp_pool_top_control(community_function, p):
n_wells = len(community_function)
randomized_community_function = community_function.copy()
np.random.shuffle(randomized_community_function)
sorted_community_function = | np.sort(randomized_community_function) | numpy.sort |
"""The lattice module define the class to handle 3D crystal lattices (the 14 Bravais lattices).
"""
import os
from pymicro.external import CifFile_module as CifFile
import enum
import functools
import math
import numpy as np
from numpy import pi, dot, transpose, radians
from matplotlib import pyplot as plt
class Crystal:
'''
The Crystal class to create any particular crystal structure.
A crystal instance is composed by:
* one of the 14 Bravais lattice
* a point basis (or motif)
'''
def __init__(self, lattice, basis=None, basis_labels=None, basis_sizes=None, basis_colors=None):
'''
Create a Crystal instance with the given lattice and basis.
This create a new instance of a Crystal object. The given lattice
is assigned to the crystal. If the basis is not specified, it will
be one atom at (0., 0., 0.).
:param lattice: the :py:class:`~pymicro.crystal.lattice.Lattice` instance of the crystal.
:param list basis: A list of tuples containing the position of the atoms in the motif.
:param list basis_labels: A list of strings containing the description of the atoms in the motif.
:param list basis_labels: A list of float between 0. and 1. (default 0.1) to sale the atoms in the motif.
:param list basis_colors: A list of vtk colors of the atoms in the motif.
'''
self._lattice = lattice
if basis == None:
# default to one atom at (0, 0, 0)
self._basis = [(0., 0., 0.)]
self._labels = ['?']
self._sizes = [0.1]
self._colors = [(0., 0., 1.)]
else:
self._basis = basis
self._labels = basis_labels
self._sizes = basis_sizes
self._colors = basis_colors
class CrystallinePhase:
def __init__(self, phase_id=1, name='unknown', lattice=None):
"""Create a new crystalline phase.
The `phase_id` attribute is used to identify the phase in data sets
where it can be referred to in phase_map for instance."""
self.phase_id = phase_id
self.name = name
self.description = ''
self.formula = ''
if lattice is None:
lattice = Lattice.cubic(1.0)
self.set_lattice(lattice)
# a list of C_IJ values
self.elastic_constants = []
def __repr__(self):
"""Generate a string representation of this instance."""
out = 'Phase %d (%s) \n\t-- ' % (self.phase_id, self.name)
out += self.get_lattice().__repr__()
if self.elastic_constants:
out += '\n\t-- elastic constants: %s' % self.elastic_constants
return out
def get_lattice(self):
"""Returns the crystal lattice."""
return self._lattice
def set_lattice(self, lattice):
"""Set the crystal lattice.
:param Lattice lattice: the crystal lattice.
"""
self._lattice = lattice
def get_symmetry(self):
"""Returns the type of `Symmetry` of the Lattice."""
return self.get_lattice().get_symmetry()
def to_dict(self):
d = {'phase_id': self.phase_id,
'name': self.name,
'description': self.description,
'formula': self.formula,
'symmetry': self.get_symmetry().to_string(),
'lattice_parameters': self.get_lattice().get_lattice_parameters(),
'lattice_parameters_unit': 'nm',
'elastic_constants': self.elastic_constants,
'elastic_constants_unit': 'MPa'
}
#print(d)
return d
@staticmethod
def from_dict(d):
sym = Symmetry.from_string(d['symmetry'])
lattice = Lattice.from_symmetry(sym, d['lattice_parameters'])
phase = CrystallinePhase(d['phase_id'], d['name'], lattice)
phase.description = d['description']
phase.formula = d['formula']
phase.elastic_constants = d['elastic_constants']
return phase
class Symmetry(enum.Enum):
"""
Class to describe crystal symmetry defined by its Laue class symbol.
"""
cubic = 'm3m'
hexagonal = '6/mmm'
orthorhombic = 'mmm'
tetragonal = '4/mmm'
trigonal = 'bar3m'
monoclinic = '2/m'
triclinic = 'bar1'
@staticmethod
def from_string(s):
if s == 'cubic':
return Symmetry.cubic
elif s == 'hexagonal':
return Symmetry.hexagonal
elif s == 'orthorhombic':
return Symmetry.orthorhombic
elif s == 'tetragonal':
return Symmetry.tetragonal
elif s == 'trigonal':
return Symmetry.trigonal
elif s == 'monoclinic':
return Symmetry.monoclinic
elif s == 'triclinic':
return Symmetry.triclinic
else:
return None
def to_string(self):
if self is Symmetry.cubic:
return 'cubic'
elif self is Symmetry.hexagonal:
return 'hexagonal'
elif self is Symmetry.orthorhombic:
return 'orthorhombic'
elif self is Symmetry.tetragonal:
return 'tetragonal'
elif self is Symmetry.trigonal:
return 'trigonal'
elif self is Symmetry.monoclinic:
return 'monoclinic'
elif self is Symmetry.triclinic:
return 'triclinic'
else:
return None
@staticmethod
def from_space_group(space_group_number):
"""Create an instance of the `Symmetry` class from a TSL symmetry
number.
:raise ValueError: if the space_group_number is not between 1 and 230.
:param int space_group_number: the number asociated with the
space group (between 1 and 230).
:return: an instance of the `Symmetry` class
"""
if space_group_number < 1 or space_group_number > 230:
raise ValueError('space_group_number must be between 1 and 230')
return None
if space_group_number <= 2:
return Symmetry.triclinic
elif space_group_number <= 15:
return Symmetry.monoclinic
elif space_group_number <= 74:
return Symmetry.orthorhombic
elif space_group_number <= 142:
return Symmetry.tetragonal
elif space_group_number <= 167:
return Symmetry.trigonal
elif space_group_number <= 194:
return Symmetry.hexagonal
else:
return Symmetry.cubic
@staticmethod
def from_tsl(tsl_number):
"""Create an instance of the `Symmetry` class from a TSL symmetry
number.
:return: an instance of the `Symmetry` class
"""
if tsl_number == 43:
return Symmetry.cubic
elif tsl_number == 62:
return Symmetry.hexagonal
elif tsl_number == 22:
return Symmetry.orthorhombic
elif tsl_number == 42:
return Symmetry.tetragonal
elif tsl_number == 32:
return Symmetry.trigonal
elif tsl_number == 2:
return Symmetry.monoclinic
elif tsl_number == 1:
return Symmetry.triclinic
else:
return None
def symmetry_operators(self, use_miller_bravais=False):
"""Define the equivalent crystal symmetries.
Those come from Randle & Engler, 2000. For instance in the cubic
crystal struture, for instance there are 24 equivalent cube orientations.
:returns array: A numpy array of shape (n, 3, 3) where n is the \
number of symmetries of the given crystal structure.
"""
if self is Symmetry.cubic:
sym = np.zeros((24, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0., 0., -1.], [0., -1., 0.], [-1., 0., 0.]])
sym[2] = np.array([[0., 0., -1.], [0., 1., 0.], [1., 0., 0.]])
sym[3] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[4] = np.array([[0., 0., 1.], [0., 1., 0.], [-1., 0., 0.]])
sym[5] = np.array([[1., 0., 0.], [0., 0., -1.], [0., 1., 0.]])
sym[6] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[7] = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
sym[8] = np.array([[0., -1., 0.], [1., 0., 0.], [0., 0., 1.]])
sym[9] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[10] = np.array([[0., 1., 0.], [-1., 0., 0.], [0., 0., 1.]])
sym[11] = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.]])
sym[12] = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
sym[13] = np.array([[0., 0., -1.], [-1., 0., 0.], [0., 1., 0.]])
sym[14] = np.array([[0., -1., 0.], [0., 0., 1.], [-1., 0., 0.]])
sym[15] = np.array([[0., 1., 0.], [0., 0., -1.], [-1., 0., 0.]])
sym[16] = np.array([[0., 0., -1.], [1., 0., 0.], [0., -1., 0.]])
sym[17] = np.array([[0., 0., 1.], [-1., 0., 0.], [0., -1., 0.]])
sym[18] = np.array([[0., -1., 0.], [0., 0., -1.], [1., 0., 0.]])
sym[19] = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1.]])
sym[20] = np.array([[-1., 0., 0.], [0., 0., 1.], [0., 1., 0.]])
sym[21] = np.array([[0., 0., 1.], [0., -1., 0.], [1., 0., 0.]])
sym[22] = np.array([[0., -1., 0.], [-1., 0., 0.], [0., 0., -1.]])
sym[23] = np.array([[-1., 0., 0.], [0., 0., -1.], [0., -1., 0.]])
elif self is Symmetry.hexagonal:
if use_miller_bravais:
# using the Miller-Bravais representation here
sym = np.zeros((12, 4, 4), dtype=np.int)
sym[0] = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
sym[1] = np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
sym[2] = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, 1]])
sym[3] = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
sym[4] = np.array([[0, 0, 1, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1]])
sym[5] = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 0, -1]])
sym[6] = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
sym[7] = np.array([[0, 0, -1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, 1]])
sym[8] = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [-1, 0, 0, 0], [0, 0, 0, 1]])
sym[9] = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]])
sym[10] = np.array([[0, 0, -1, 0], [-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 0, -1]])
sym[11] = np.array([[0, -1, 0, 0], [0, 0, -1, 0], [-1, 0, 0, 0], [0, 0, 0, -1]])
else:
sym = np.zeros((12, 3, 3), dtype=np.float)
s60 = np.sin(60 * np.pi / 180)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[0.5, s60, 0.], [-s60, 0.5, 0.], [0., 0., 1.]])
sym[2] = np.array([[-0.5, s60, 0.], [-s60, -0.5, 0.], [0., 0., 1.]])
sym[3] = np.array([[-1., 0., 0.], [0., -1., 0.], [0., 0., 1.]])
sym[4] = np.array([[-0.5, -s60, 0.], [s60, -0.5, 0.], [0., 0., 1.]])
sym[5] = np.array([[0.5, -s60, 0.], [s60, 0.5, 0.], [0., 0., 1.]])
sym[6] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[7] = np.array([[0.5, s60, 0.], [s60, -0.5, 0.], [0., 0., -1.]])
sym[8] = np.array([[-0.5, s60, 0.], [s60, 0.5, 0.], [0., 0., -1.]])
sym[9] = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
sym[10] = np.array([[-0.5, -s60, 0.], [-s60, 0.5, 0.], [0., 0., -1.]])
sym[11] = np.array([[0.5, -s60, 0.], [-s60, -0.5, 0.], [0., 0., -1.]])
elif self is Symmetry.orthorhombic:
sym = np.zeros((4, 3, 3), dtype=np.float)
sym[0] = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
sym[1] = np.array([[1., 0., 0.], [0., -1., 0.], [0., 0., -1.]])
sym[2] = | np.array([[-1., 0., -1.], [0., 1., 0.], [0., 0., -1.]]) | numpy.array |
"""
runs using
1) python/3.3.2 2) numpy/1.11.1/python/3.3 3) scipy/0.17.1/python/3.3
module load python3/3.3.2 /scratch/mhess/py3p3/3.3
Aim: do MOR steady state solver with efficient matrices
"""
import numpy as np
import time
from ITHACA_SEM_code import ROM_Oseen_Iteration_step
from ITHACA_SEM_code import Generate_Advection_Terms
from ITHACA_SEM_code import ROM_Oseen_Iteration_step
def remove_from_matrix(matrix, columns, rows):
return [
[float(matrix[row_num][col_num])
for col_num in range(len(matrix[row_num]))
if not col_num in columns]
for row_num in range(len(matrix))
if not row_num in rows]
#(Dbnd, Dint, sing_A, sing_B, sing_Btilde, sing_C, M_trafo_no_pp_incl_dbc) = gen_Mats_cav.gen()
def Oseen_Iteration(mu_vec, Dbnd, Dint, sing_A, sing_B, sing_Btilde, sing_C, M_trafo_no_pp_incl_dbc, glodofphys, LocGloMapMatA, LocGloSign, LocGloMap, snap, ngc, nlc, bmap, imap, num_elem, nsize_bndry, nsize_int, nsize_p, nvel, c_f_bnd, c_f_p, c_f_int, NumDirBCs, nGlobHomBndDofs, no_loc_dbc, elem_loc_dbc, no_not_loc_dbc, elem_not_loc_dbc, IP, IPp, bwdtrans, cartmap0, cartmap1, LocGloBndSign, LocGloBndMap, BndCondCoeffsToGlobalCoeffsMap , init_bnd, forcing):
print("snap.shape ", snap.shape)
snap_x = snap[:, 0:ngc]
snap_y = snap[:, ngc:2*ngc]
print("snap_x.shape ", snap_x.shape)
print("snap_y.shape ", snap_y.shape)
snap_x_mean = np.mean(snap_x, axis=0)
snap_y_mean = np.mean(snap_y, axis=0)
nbmap = bmap.shape[0]
nimap = imap.shape[0]
nbnd = nbmap
nint = nimap
nsize_bndry_p1 = nsize_bndry + 1
nsize_p_m1 = nsize_p - 1
u_x = snap_x_mean
u_y = snap_y_mean
supr_f_bnd = np.dot( np.transpose(Dbnd) , c_f_p )
supr_f_int = np.dot( np.transpose(Dint) , c_f_p )
c_f_all = np.r_[c_f_bnd, c_f_p, c_f_int]
(c_f_all, R1, R2) = np.linalg.svd(c_f_all, full_matrices=False)
c_f_all = c_f_all[:, 0:np.max(np.where(np.cumsum(R1)/np.sum(R1) < .99))+1]
c_f_bnd = c_f_all[0:c_f_bnd.shape[0],:]
c_f_p = c_f_all[c_f_bnd.shape[0]:c_f_bnd.shape[0]+c_f_p.shape[0],:]
c_f_int = c_f_all[c_f_bnd.shape[0]+c_f_p.shape[0]:c_f_bnd.shape[0]+c_f_p.shape[0]+c_f_int.shape[0],:]
print("c_f_bnd.shape ", c_f_bnd.shape)
print("c_f_p.shape ", c_f_p.shape)
print("c_f_int.shape ", c_f_int.shape)
nBndDofs = nGlobHomBndDofs + NumDirBCs
nGlobBndDofs = nBndDofs
no_of_dbc_in_loc = no_loc_dbc
loc_dbc_set = elem_loc_dbc
RBsize = c_f_bnd.shape[1]
V_f_bnd = np.zeros([num_elem*nsize_bndry - no_of_dbc_in_loc, c_f_bnd.shape[1]])
cropped_counter = 0
for i in range(0, num_elem*nsize_bndry):
if i not in loc_dbc_set:
V_f_bnd[cropped_counter,:] = c_f_bnd[i,:]
cropped_counter = cropped_counter + 1
V_f_p = c_f_p
V_f_int = c_f_int
f_bnd_dbc = 0*np.arange((num_elem*nsize_bndry - no_not_loc_dbc)*1.0)
non_cropped_counter = 0
for i in range(0,num_elem*nsize_bndry):
if i in loc_dbc_set:
f_bnd_dbc[non_cropped_counter] = c_f_bnd[i,0]
non_cropped_counter += 1
t = time.time()
sing_A_M = np.dot(np.transpose(M_trafo_no_pp_incl_dbc),sing_A)
sing_A_MM = np.dot(M_trafo_no_pp_incl_dbc,sing_A_M)
Dbnd_M = np.dot(Dbnd, M_trafo_no_pp_incl_dbc)
Dbnd_MM = np.dot(Dbnd_M, np.transpose(M_trafo_no_pp_incl_dbc))
sing_B_M = np.dot(np.transpose(M_trafo_no_pp_incl_dbc),sing_B)
sing_B_MM = np.dot(M_trafo_no_pp_incl_dbc,sing_B_M)
sing_A_MM_cropped = np.array(remove_from_matrix(sing_A_MM, loc_dbc_set, loc_dbc_set))
sing_A_MM_Aud = np.array(remove_from_matrix(sing_A_MM, elem_not_loc_dbc, loc_dbc_set))
Dbnd_crop = np.array(remove_from_matrix(Dbnd, loc_dbc_set, set()))
sing_Dbnd_Aud = np.array(remove_from_matrix(Dbnd, elem_not_loc_dbc, set()))
sing_B_MM_cropped = np.array(remove_from_matrix(sing_B_MM, set(), loc_dbc_set))
Dbnd_MM_crop = np.array(remove_from_matrix(Dbnd_MM, loc_dbc_set, set()))
sing_Btilde_cropped = np.array(remove_from_matrix(sing_Btilde, set(), loc_dbc_set))
sing_Btilde_Aud = np.array(remove_from_matrix(sing_Btilde, set(), elem_not_loc_dbc))
A_11_1 = np.dot(np.dot(np.transpose(V_f_bnd), sing_A_MM_cropped), V_f_bnd)
A_21_alt = -np.dot(np.dot(np.transpose(V_f_p), Dbnd_crop), V_f_bnd)
A_31_1 = np.dot(np.dot(np.transpose(V_f_int), np.transpose(sing_Btilde_cropped)), V_f_bnd)
A_12_alt = -np.dot(np.dot(np.transpose(V_f_bnd), np.transpose(Dbnd_MM_crop)), V_f_p)
A_32_alt = -np.dot(np.dot(np.transpose(V_f_int), np.transpose(Dint)), V_f_p)
A_13_1 = np.dot(np.dot(np.transpose(V_f_bnd), sing_B_MM_cropped), V_f_int)
A_23_alt = np.transpose(A_32_alt)
A_33_1 = np.dot(np.dot(np.transpose(V_f_int), sing_C), V_f_int)
r1_1 = np.dot(np.dot(np.transpose(V_f_bnd), sing_A_MM_Aud), f_bnd_dbc)
r2_1 = -np.dot(np.dot(np.transpose(V_f_p), sing_Dbnd_Aud), f_bnd_dbc)
r3_1 = np.dot(np.dot(np.transpose(V_f_int), np.transpose(sing_Btilde_Aud)), f_bnd_dbc)
Dbnd_M = np.dot(Dbnd, M_trafo_no_pp_incl_dbc)
Dbnd_MM = np.dot(Dbnd_M, np.transpose(M_trafo_no_pp_incl_dbc))
Dbnd_crop = np.array(remove_from_matrix(Dbnd, loc_dbc_set, set()))
sing_Dbnd_Aud = np.array(remove_from_matrix(Dbnd, elem_not_loc_dbc, set()))
Dbnd_MM_crop = np.array(remove_from_matrix(Dbnd_MM, loc_dbc_set, set()))
row1_2 = -np.dot(np.transpose(c_f_bnd), np.dot(np.transpose(Dbnd), c_f_p))
row2_1 = -np.dot(np.transpose(c_f_p), np.dot(Dbnd, c_f_bnd))
row2_3 = -np.dot(np.transpose(c_f_p), np.dot(Dint, c_f_int))
row3_2 = -np.dot(np.transpose(c_f_int), np.dot(np.transpose(Dint), c_f_p))
sing_A_pt1_proj = np.dot(np.dot(np.transpose(c_f_bnd), sing_A), c_f_bnd)
sing_B_pt1_proj = np.dot(np.dot(np.transpose(c_f_bnd), sing_B), c_f_int)
sing_Btilde_pt1_proj = np.dot(np.dot(np.transpose(c_f_int), np.transpose(sing_Btilde)), c_f_bnd)
sing_C_pt1_proj = np.dot(np.dot(np.transpose(c_f_int), sing_C), c_f_int)
elapsed = time.time() - t
print('time MatMults: ', elapsed)
t = time.time()
sing_A_MM_cropped_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_B_MM_cropped_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_A_MM_Aud_proj_x = np.zeros([RBsize, RBsize])
sing_A_MM_cropped_proj_y = np.zeros([RBsize, RBsize, RBsize])
sing_B_MM_cropped_proj_y = np.zeros([RBsize, RBsize, RBsize])
sing_A_MM_Aud_proj_y = np.zeros([RBsize, RBsize])
sing_Btilde_cropped_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_Btilde_cropped_proj_y = np.zeros([RBsize, RBsize, RBsize])
sing_Btilde_Aud_proj_x = np.zeros([RBsize, RBsize])
sing_Btilde_Aud_proj_y = np.zeros([RBsize, RBsize])
sing_C_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_C_proj_y = np.zeros([RBsize, RBsize, RBsize])
sing_A_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_A_proj_y = np.zeros([RBsize, RBsize, RBsize])
sing_B_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_B_proj_y = np.zeros([RBsize, RBsize, RBsize])
sing_Btilde_proj_x = np.zeros([RBsize, RBsize, RBsize])
sing_Btilde_proj_y = np.zeros([RBsize, RBsize, RBsize])
nphys = glodofphys.shape[1]
phys_basis_x = np.zeros([nphys,RBsize])
phys_basis_y = np.zeros([nphys,RBsize])
# use unit inverse tt_cd mapping
# inv_tt_cd = 0*tt_cd[0:ngc]
inv_tt_cd = np.arange(ngc)
for i in range(0,ngc):
inv_tt_cd[i] = int(i)
#inv_tt_cd[tt_cd[i]] = int(i)
# compute for all basis functions, the gen_Mats_adv
nplanecoeffs = nlc
for curr_basis in range(0, c_f_bnd.shape[1]):
cnt = 0
cnt1 = 0
offset = 0
fields = np.zeros( [nvel, nplanecoeffs] )
for curr_elem in range(0, num_elem):
for j in range(0, nvel):
for k in range(0, nbnd):
fields[j, offset + int(bmap[k])] = c_f_bnd[cnt + k, curr_basis]
for k in range(0, nint):
fields[j, offset + int(imap[k])] = c_f_int[cnt1+k, curr_basis]
cnt += nbnd
cnt1 += nint
offset += nbnd + nint # is 169
nvel = 2
cnt = 0
offset = 0
velo0 = np.zeros( [ngc] )
velo1 = np.zeros( [ngc] )
for i in range(0, nplanecoeffs):
velo0 = np.dot(LocGloMapMatA, np.transpose(fields[0,:]))
velo1 = np.dot(LocGloMapMatA, np.transpose(fields[1,:]))
# velo0[int(LocGloMapA[i])] = int(LocGloSignA[i]) * fields[0,i]
# velo1[int(LocGloMapA[i])] = int(LocGloSignA[i]) * fields[1,i] # use here inv_tt_cd ??
velo0 = velo0[inv_tt_cd[0:ngc]]
velo1 = velo1[inv_tt_cd[0:ngc]]
basis_x = np.dot(velo0, glodofphys)
basis_y = np.dot(velo1, glodofphys)
phys_basis_x[:,curr_basis] = basis_x
phys_basis_y[:,curr_basis] = basis_y
(phys_basis_x, R1, R2) = np.linalg.svd(phys_basis_x, full_matrices=False)
(phys_basis_y, R1, R2) = | np.linalg.svd(phys_basis_y, full_matrices=False) | numpy.linalg.svd |
import pytest
import os
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from sklearn.model_selection import cross_val_score
from sklearn.datasets import make_classification
from sklearn.exceptions import NotFittedError
from mcalf.models import ModelBase, IBIS8542Model, FitResults
from mcalf.profiles.voigt import voigt, double_voigt
from ..helpers import data_path_function, figure_test
data_path = data_path_function('models')
class DummyClassifier:
def __init__(self, trained=False, n_features=None, classifications=None):
self.trained = trained
self.n_features = n_features
self.defined_return = True if classifications is not None else False
self.classifications = classifications
def train(self, X, y):
assert np.ndim(X) == 2 # (n_samples, n_features)
assert np.ndim(y) == 1 # (n_samples)
assert len(X) == len(y)
self.trained = True
self.n_features = X.shape[-1]
def test(self, X, y):
if not self.trained:
raise NotFittedError()
assert np.ndim(X) == 2 # (n_samples, n_features)
assert np.ndim(y) == 1 # (n_samples)
assert X.shape[-1] == self.n_features
assert len(X) == len(y)
def predict(self, X):
if self.defined_return:
return self.classifications.flatten()
if not self.trained:
raise NotFittedError()
assert np.ndim(X) == 2 # (n_samples, n_features)
assert X.shape[-1] == self.n_features
return np.asarray(X[:, -1] * 100, dtype=int)
@pytest.mark.parametrize('model', (ModelBase, IBIS8542Model))
def test_default_parameters(model):
# Test default parameters
with pytest.raises(ValueError) as e:
model()
assert 'original_wavelengths' in str(e.value)
def test_ibis8542model_basic():
# Will break if default parameters are changes in mcalf.models.IBIS8542Model
wl = 1000.97
x_orig = np.linspace(999.81, 1002.13, num=25)
prefilter_main = 1 - np.abs(x_orig - wl) * 0.1
prefilter_wvscl = x_orig - wl
m = IBIS8542Model(stationary_line_core=wl, original_wavelengths=x_orig,
prefilter_ref_main=prefilter_main, prefilter_ref_wvscl=prefilter_wvscl)
bg = 1327.243
arr = voigt(x_orig, -231.42, wl+0.05, 0.2, 0.21, bg)
m.load_array(np.array([arr, arr]), names=['row', 'wavelength'])
m.load_background(np.array([bg, bg]), names=['row'])
# Fit with assumed neural network classification
fit1, fit2 = m.fit(row=[0, 1], classifications=np.array([0, 0]))
fit3 = m.fit_spectrum(arr, classifications=0, background=bg) # Make sure this is equiv
truth = [-215.08275199, 1001.01035476, 0.00477063, 0.28869302]
assert np.array_equal(fit1.parameters, fit2.parameters)
assert np.array_equal(fit1.parameters, fit3.parameters)
assert list(fit1.parameters) == pytest.approx(truth)
def test_ibis8542model_configfile():
# Enter the data directory (needed to find the files referenced in the config files)
original_dir = os.getcwd()
os.chdir(data_path())
# Test with config file
IBIS8542Model(config="ibis8542model_config.yml")
# Turn off sigma
IBIS8542Model(config="ibis8542model_config.yml", sigma=False)
# Test with defined prefilter
with pytest.deprecated_call():
IBIS8542Model(config="ibis8542model_config_prefilter.yml")
# Test with no prefilter
IBIS8542Model(config="ibis8542model_config_noprefilter.yml")
# TODO Check that the parameters were imported correctly
# Go back to original directory
os.chdir(original_dir)
@pytest.fixture
def valid_kwargs():
stationary_line_core = 8542.099145376844
defaults = {
'stationary_line_core': stationary_line_core,
'original_wavelengths': np.linspace(stationary_line_core - 2, stationary_line_core + 2, num=25),
'constant_wavelengths': np.linspace(stationary_line_core - 1.7, stationary_line_core + 1.7, num=30),
'prefilter_response': np.loadtxt(data_path('ibis8542model_prefilter.csv'), delimiter=','),
}
return defaults
@pytest.mark.parametrize('model', (ModelBase, IBIS8542Model))
def test_validate_parameters(model, valid_kwargs):
# Default parameters should work
model(**valid_kwargs)
# original_wavelengths or constant_wavelengths not sorted
for wavelengths in ('original_wavelengths', 'constant_wavelengths'):
with pytest.raises(ValueError) as e:
defaults_mod = valid_kwargs.copy()
unsorted = defaults_mod[wavelengths].copy() # Copy the numpy array before shuffling
unsorted[10] = unsorted[12] # Copy the 12th element to the 10th (now out of order)
defaults_mod[wavelengths] = unsorted # Replace the sorted array with the unsorted
model(**defaults_mod) # Initialise the model
assert wavelengths in str(e.value) and 'must be sorted ascending' in str(e.value)
# assert warning when constant_wavelengths extrapolates original_wavelengths (1e-5 over below and above)
delta_lambda = 0.05
for match, i, delta in [
("Upper bound of `constant_wavelengths` is outside of `original_wavelengths` range.", -1, delta_lambda + 1e-5),
("Lower bound of `constant_wavelengths` is outside of `original_wavelengths` range.", 0, -1e-5)
]:
with pytest.warns(Warning, match=match):
defaults_mod = valid_kwargs.copy()
extrapolate = defaults_mod['constant_wavelengths'].copy()
extrapolate[i] = defaults_mod['original_wavelengths'][i] + delta
defaults_mod['constant_wavelengths'] = extrapolate
model(**defaults_mod, delta_lambda=delta_lambda) # Initialise the model
# stationary_line_core is not a float
with pytest.raises(ValueError) as e:
defaults_mod = valid_kwargs.copy()
defaults_mod.update({'stationary_line_core': int(8542)})
model(**defaults_mod)
assert 'stationary_line_core' in str(e.value) and 'float' in str(e.value)
# stationary_line_core out of wavelength range
with pytest.raises(ValueError) as e:
defaults_mod = valid_kwargs.copy()
defaults_mod.update({'stationary_line_core': float(1000)})
model(**defaults_mod)
assert 'stationary_line_core' in str(e.value) and 'is not within' in str(e.value)
# Check that length of prefilter response is enforced
with pytest.raises(ValueError) as e:
defaults_mod = valid_kwargs.copy()
defaults_mod.update({'prefilter_response': valid_kwargs['prefilter_response'][:-1]})
m = model(**defaults_mod)
m._set_prefilter()
assert 'prefilter_response' in str(e.value) and 'must be the same length' in str(e.value)
# Check that unexpected kwargs raise an error
with pytest.raises(TypeError) as e:
model(**valid_kwargs, qheysnfebsy=None)
assert 'got an unexpected keyword argument' in str(e.value) and 'qheysnfebsy' in str(e.value)
# TODO Verify remaining conditions
@pytest.mark.parametrize('model', (ModelBase, IBIS8542Model))
def test_load_data(model, valid_kwargs):
# Initialise model
m = model(**valid_kwargs)
# Unknown target
array = np.random.rand(5, 10, 15, 30)
with pytest.raises(ValueError) as e:
m._load_data(array, names=['time', 'row', 'column', 'wavelength'], target='arrrrray')
assert 'array target must be' in str(e.value) and 'arrrrray' in str(e.value)
# Ensure a single spectrum array cannot be loaded (not supported)
for target, names, array in [('array', ['wavelength'], np.random.rand(30)), ('background', [], 723.23)]:
with pytest.raises(ValueError) as e:
m._load_data(array, names=names, target=target)
assert 'cannot load an array containing one spectrum' in str(e.value)
# Ensure dimension names are validated
for meth in (m.load_array, m.load_background):
array = np.random.rand(5, 10, 15, 30)
with pytest.raises(ValueError) as e:
meth(array)
assert 'dimension names must be specified' in str(e.value)
array = np.random.rand(5, 10, 15, 30)
with pytest.raises(ValueError) as e:
meth(array, names=['row', 'column', 'wavelength'])
assert 'number of dimension names do not match number of columns' in str(e.value)
array = np.random.rand(5, 10, 15, 30)
with pytest.raises(ValueError) as e:
meth(array, names=['wavelength', 'row', 'column', 'wavelength'])
assert 'duplicate dimension names found' in str(e.value)
array = np.random.rand(5, 10, 15)
with pytest.raises(ValueError) as e:
m.load_array(array, names=['time', 'row', 'column'])
assert 'array must contain a wavelength dimension' in str(e.value)
# 'wavelengths' not valid background dimension
array = np.random.rand(5, 10, 15)
with pytest.raises(ValueError) as e:
m.load_background(array, names=['row', 'column', 'wavelength'])
assert "name 'wavelength' is not a valid dimension name" in str(e.value)
# 'rows' should be 'row' etc.
array = np.random.rand(5, 10, 15, 30)
with pytest.raises(ValueError) as e:
m.load_array(array, names=['time', 'rows', 'column', 'wavelength'])
assert "name 'rows' is not a valid dimension name" in str(e.value)
array = np.random.rand(5, 10, 15)
with pytest.raises(ValueError) as e:
m.load_background(array, names=['time', 'rows', 'column'])
assert "name 'rows' is not a valid dimension name" in str(e.value)
array = np.random.rand(5, 10, 15, 30)
with pytest.raises(ValueError) as e:
m.load_array(array, names=['time', 'row', 'column', 'wavelength'])
assert 'length of wavelength dimension not equal length of original_wavelengths' in str(e.value)
array = np.random.rand(5, 10, 15)
m.load_background(array, names=['time', 'row', 'column'])
array = np.random.rand(5, 7, 15, 25)
with pytest.warns(UserWarning, match="shape of spectrum array indices does not match shape of background array"):
m.load_array(array, names=['time', 'row', 'column', 'wavelength'])
@pytest.mark.parametrize('model', (ModelBase, IBIS8542Model))
def test_get_time_row_column(model, valid_kwargs):
# Initialise model
m = model(**valid_kwargs)
array = np.random.rand(4, 5, 6, 25)
names = ['time', 'row', 'column', 'wavelength']
m.load_array(array.copy(), names=names)
# (test get_spectra with multiple spectra -- only supports a single spectrum)
with pytest.raises(ValueError) as e:
m.get_spectra(spectrum=np.random.rand(10, 30))
assert 'explicit spectrum must have one dimension' in str(e.value)
# All dimensions loaded so make sure all dimensions required
for dim in names[:-1]:
with pytest.raises(ValueError) as e:
drop_a_name = names[:-1].copy()
drop_a_name.remove(dim)
m._get_time_row_column(**{k: 2 for k in drop_a_name})
assert f"{dim} index must be specified as multiple indices exist" == str(e.value)
# Drop a dimension and don't specify it
array = array[0] # Drop one (arbitrary) dimension
for dim in names[:-1]:
drop_a_name = names.copy()
drop_a_name.remove(dim)
m.load_array(array.copy(), names=drop_a_name)
m._get_time_row_column(**{k: 2 for k in drop_a_name[:-1]})
def test_modelbase_fit(valid_kwargs):
m = ModelBase(**valid_kwargs)
array = np.random.rand(4, 5, 6, 25)
array[0] = np.nan # For testing no spectra given
names = ['time', 'row', 'column', 'wavelength']
m.load_array(array.copy(), names=names)
# `_fit` method must be implemented in a subclass
with pytest.raises(NotImplementedError):
m._fit(np.random.rand(30))
# Must raise exception if no spectra presented for fitting
with pytest.raises(ValueError) as e:
m.fit(time=0, row=range(5), column=range(6))
assert 'no valid spectra given' in str(e.value)
def test_ibis8542model_validate_parameters(valid_kwargs):
stationary_line_core = valid_kwargs['stationary_line_core']
defaults = {
'absorption_guess': [-1000, stationary_line_core, 0.2, 0.1],
'emission_guess': [1000, stationary_line_core, 0.2, 0.1],
'absorption_min_bound': [-np.inf, stationary_line_core - 0.15, 1e-6, 1e-6],
'emission_min_bound': [0, -np.inf, 1e-6, 1e-6],
'absorption_max_bound': [0, stationary_line_core + 0.15, 1, 1],
'emission_max_bound': [np.inf, np.inf, 1, 1],
'absorption_x_scale': [1500, 0.2, 0.3, 0.5],
'emission_x_scale': [1500, 0.2, 0.3, 0.5]
}
# Incorrect Lengths
for key, value in defaults.items(): # For each parameter
with pytest.raises(ValueError) as e: # Catch ValueErrors
defaults_mod = defaults.copy() # Create a copy of the default parameters
defaults_mod.update({key: value[:-1]}) # Crop the parameter's value
IBIS8542Model(**valid_kwargs, **defaults_mod) # Pass the cropped parameter with the other default params
assert key in str(e.value) and 'length' in str(e.value) # Error must be about length of current parameter
# Check that the sign of the following amplitudes are enforced
for sign, bad_number, parameters in [('positive', -10.42, ('emission_guess', 'emission_min_bound')),
('negative', +10.42, ('absorption_guess', 'absorption_max_bound'))]:
for p in parameters:
with pytest.raises(ValueError) as e:
bad_value = defaults[p].copy()
bad_value[0] = bad_number
defaults_mod = defaults.copy()
defaults_mod.update({p: bad_value})
IBIS8542Model(**valid_kwargs, **defaults_mod)
assert p in str(e.value) and sign in str(e.value)
# TODO Verify remaining conditions
def test_ibis8542model_get_sigma():
# Enter the data directory (needed to find the files referenced in the config files)
original_dir = os.getcwd()
os.chdir(data_path())
m = IBIS8542Model(config="ibis8542model_config.yml")
sigma = np.loadtxt("ibis8542model_sigma.csv", delimiter=',')
assert np.array_equal(m._get_sigma(classification=0), sigma[0])
for c in (1, 2, 3, 4):
assert np.array_equal(m._get_sigma(classification=c), sigma[1])
for i in (0, 1):
assert np.array_equal(m._get_sigma(sigma=i), sigma[i])
x = np.array([1.4325, 1421.43, -1325.342, 153.3, 1.2, 433.0])
assert np.array_equal(m._get_sigma(sigma=x), x)
# Go back to original directory
os.chdir(original_dir)
@pytest.fixture(scope='module')
def ibis8542model_init():
# Enter the data directory (needed to find the files referenced in the config files)
original_dir = os.getcwd()
os.chdir(data_path())
x_orig = np.loadtxt("ibis8542model_wavelengths_original.csv")
m = IBIS8542Model(config="ibis8542model_init_config.yml", constant_wavelengths=x_orig, original_wavelengths=x_orig,
prefilter_response=np.ones(25))
m.neural_network = DummyClassifier(trained=True, n_features=25)
# Go back to original directory
os.chdir(original_dir)
return m
def test_ibis8542model_classify_spectra(ibis8542model_init):
np.random.seed(0)
spectra = np.random.rand(30, 25) * 100 + 1000
truth = np.array([10, 37, 72, 0, 27, 100, 81, 44, 17, 49, 22, 97, 28, 100, 22, 17,
64, 91, 94, 47, 0, 34, 96, 98, 97, 3, 82, 87, 40, 51], dtype=int)
m = ibis8542model_init
classifications = m.classify_spectra(spectra=spectra, only_normalise=True)
assert np.array_equal(classifications, truth)
for i in range(len(truth)):
classifications = m.classify_spectra(spectra=spectra[i], only_normalise=False)
assert classifications[0] == truth[i] and len(classifications) == 1
m.neural_network.trained = False
with pytest.raises(NotFittedError):
m.classify_spectra(spectra=spectra, only_normalise=True)
@pytest.fixture(scope='module')
def ibis8542model_spectra(ibis8542model_init):
"""IBIS8542Model with random data loaded"""
m = ibis8542model_init
spectra = np.empty((2, 3, 4, len(m.original_wavelengths)), dtype=np.float64)
classifications = np.empty((2, 3, 4), dtype=int)
np.random.seed(253)
a1_array = np.random.rand(*spectra.shape[:-1]) * 500 - 800
a2_array = np.random.rand(*spectra.shape[:-1]) * 500 + 300
b1_array = np.random.rand(*spectra.shape[:-1]) / 2 - 0.25 + m.stationary_line_core
b2_array = np.random.rand(*spectra.shape[:-1]) / 2 - 0.25 + m.stationary_line_core
s1_array = np.random.rand(*spectra.shape[:-1]) / 2 + 0.1
s2_array = np.random.rand(*spectra.shape[:-1]) / 2 + 0.1
g1_array = np.random.rand(*spectra.shape[:-1]) / 2 + 0.1
g2_array = np.random.rand(*spectra.shape[:-1]) / 2 + 0.1
d_array = np.random.rand(*spectra.shape[:-1]) * 600 + 700
a1_array[0, 1] = np.nan
a2_array[1, :2] = np.nan
for i in range(len(spectra)):
for j in range(len(spectra[0])):
for k in range(len(spectra[0, 0])):
if np.isnan(a1_array[i, j, k]):
classifications[i, j, k] = 4
spectra[i, j, k] = voigt(m.original_wavelengths, a2_array[i, j, k], b2_array[i, j, k],
s2_array[i, j, k], g2_array[i, j, k], d_array[i, j, k])
elif np.isnan(a2_array[i, j, k]):
classifications[i, j, k] = 0
spectra[i, j, k] = voigt(m.original_wavelengths, a1_array[i, j, k], b1_array[i, j, k],
s1_array[i, j, k], g1_array[i, j, k], d_array[i, j, k])
else:
classifications[i, j, k] = 1
spectra[i, j, k] = double_voigt(m.original_wavelengths, a1_array[i, j, k], a2_array[i, j, k],
b1_array[i, j, k], b2_array[i, j, k],
s1_array[i, j, k], s2_array[i, j, k],
g1_array[i, j, k], g2_array[i, j, k], d_array[i, j, k])
m.load_array(spectra, names=['time', 'row', 'column', 'wavelength'])
m.load_background(d_array, names=['time', 'row', 'column'])
return m, classifications
@pytest.fixture(scope='module')
def ibis8542model_results(ibis8542model_spectra):
# Load model with random spectra loaded
m, classifications = ibis8542model_spectra
# Test with explicit classifications
result = m.fit(time=range(2), row=range(3), column=range(4), classifications=classifications)
return result, m, classifications
def assert_results_equal(res1, res2):
for i in range(len(res1)):
assert res1[i].parameters == pytest.approx(res2[i].parameters, nan_ok=True)
assert res1[i].classification == res2[i].classification
assert res1[i].profile == res2[i].profile
assert res1[i].success == res2[i].success
assert np.array_equal(res1[i].index, res2[i].index)
def test_ibis8542model_wrong_length_of_classifications(ibis8542model_spectra):
# Load model with random spectra loaded
m, classifications = ibis8542model_spectra
assert classifications.shape == (2, 3, 4) # This is what the test needs to work (verifies fixture)
# Test with too few classifications
c1 = classifications[:, :, [0, 1]]
assert c1.shape == (2, 3, 2)
c2 = classifications[:, [0, 1]]
assert c2.shape == (2, 2, 4)
c3 = classifications[[1]]
assert c3.shape == (1, 3, 4)
c_wrong_shape = np.transpose(classifications) # ...but correct size so would not fail otherwise
assert c_wrong_shape.shape == (4, 3, 2)
for c in [c1, c2, c3, c_wrong_shape]:
with pytest.raises(ValueError) as e:
m.fit(time=range(2), row=range(3), column=range(4), classifications=c)
assert 'classifications do not match' in str(e.value)
# Test with too many classifications
for t, r, c in [
# (range(2), range(3), range(4)), # Correct values
(1, range(3), range(4)),
(range(2), range(1, 3), range(4)),
(range(2), range(3), range(2, 4)),
(range(2), range(3), 3),
(0, 1, 2),
]:
with pytest.raises(ValueError) as e:
m.fit(time=t, row=r, column=c, classifications=classifications)
assert 'classifications do not match' in str(e.value)
# Test with dimensions of length 1 removed (res1 and res2 should be equivalent)
c = classifications[:, np.newaxis, 0]
assert c.shape == (2, 1, 4)
res1 = m.fit(time=range(2), row=range(1), column=range(4), classifications=c)
c = classifications[:, 0]
assert c.shape == (2, 4)
res2 = m.fit(time=range(2), row=range(1), column=range(4), classifications=c)
assert len(res1) == len(res2) == 2 * 1 * 4
assert_results_equal(res1, res2)
@pytest.fixture()
def ibis8542model_resultsobjs(ibis8542model_results):
def fits2array(results):
results0 = FitResults((3, 4), 8, time=0)
results1 = FitResults((3, 4), 8, time=1)
for i in range(len(results)):
if results[i].index[0] == 0:
results0.append(results[i])
elif results[i].index[0] == 1:
results1.append(results[i])
else:
raise ValueError("invalid time")
return results0, results1
return fits2array
def test_ibis8542model_fit(ibis8542model_results, ibis8542model_resultsobjs):
# # METHOD 1: Test with explicit classifications
res1, m, classifications = ibis8542model_results
# # METHOD 2: Test with dummy classifier
m.neural_network = DummyClassifier(trained=True, classifications=classifications)
res2 = m.fit(time=range(2), row=range(3), column=range(4))
assert len(res1) == len(res2) == 2*3*4
assert_results_equal(res1, res2)
# # METHOD 3: Test over 4 processing pools
res3 = m.fit(time=range(2), row=range(3), column=range(4), n_pools=4)
# (n_pools must be an integer)
with pytest.raises(TypeError) as e:
m.fit(time=range(2), row=range(3), column=range(4), n_pools=float(4))
assert 'n_pools must be an integer' in str(e.value)
# # Test that FitResults objects can be created consistently for all of the methods
# Create the FitResults objects from the list of FitResult objects
results10, results11 = ibis8542model_resultsobjs(res1) # time : 0, 1
results20, results21 = ibis8542model_resultsobjs(res2)
results30, results31 = ibis8542model_resultsobjs(res3)
# Compare METHOD 1, METHOD 2, METHOD 3
for t0, t1 in [(results20, results21), (results30, results31)]: # For each alternative method (tn)
for results, tn in [(results10, t0), (results11, t1)]: # Compare to main method (results)
assert results.parameters == pytest.approx(tn.parameters, nan_ok=True)
assert | np.array_equal(results.profile, tn.profile) | numpy.array_equal |
import numpy as np
from scipy import interpolate
import pdb
import tqdm
def _estim_dist_old(quantiles, percentiles, y_min, y_max, smooth_tails, tau):
""" Estimate CDF from list of quantiles, with smoothing """
noise = np.random.uniform(low=0.0, high=1e-8, size=((len(quantiles),)))
noise_monotone = np.sort(noise)
quantiles = quantiles + noise_monotone
# Smooth tails
def interp1d(x, y, a, b):
return interpolate.interp1d(x, y, bounds_error=False, fill_value=(a, b), assume_sorted=True)
cdf = interp1d(quantiles, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles, y_min, y_max)
if smooth_tails:
# Uniform smoothing of tails
quantiles_smooth = quantiles
tau_lo = tau
tau_hi = 1-tau
q_lo = inv_cdf(tau_lo)
q_hi = inv_cdf(tau_hi)
idx_lo = np.where(percentiles < tau_lo)[0]
idx_hi = np.where(percentiles > tau_hi)[0]
if len(idx_lo) > 0:
quantiles_smooth[idx_lo] = np.linspace(quantiles[0], q_lo, num=len(idx_lo))
if len(idx_hi) > 0:
quantiles_smooth[idx_hi] = np.linspace(q_hi, quantiles[-1], num=len(idx_hi))
cdf = interp1d(quantiles_smooth, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles_smooth, y_min, y_max)
return cdf, inv_cdf
def _estim_dist(quantiles, percentiles, y_min, y_max, smooth_tails, tau):
""" Estimate CDF from list of quantiles, with smoothing """
noise = np.random.uniform(low=0.0, high=1e-5, size=((len(quantiles),)))
noise_monotone = np.sort(noise)
quantiles = quantiles + noise_monotone
# Smooth tails
def interp1d(x, y, a, b):
return interpolate.interp1d(x, y, bounds_error=False, fill_value=(a, b), assume_sorted=True)
cdf = interp1d(quantiles, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles, y_min, y_max)
if smooth_tails:
# Uniform smoothing of tails
quantiles_smooth = quantiles
tau_lo = tau
tau_hi = 1-tau
q_lo = inv_cdf(tau_lo)
q_hi = inv_cdf(tau_hi)
idx_lo = np.where(percentiles < tau_lo)[0]
idx_hi = np.where(percentiles > tau_hi)[0]
if len(idx_lo) > 0:
quantiles_smooth[idx_lo] = np.linspace(quantiles[0], q_lo, num=len(idx_lo))
if len(idx_hi) > 0:
quantiles_smooth[idx_hi] = np.linspace(q_hi, quantiles[-1], num=len(idx_hi))
cdf = interp1d(quantiles_smooth, percentiles, 0.0, 1.0)
inv_cdf = interp1d(percentiles, quantiles_smooth, y_min, y_max)
# Standardize
breaks = np.linspace(y_min, y_max, num=1000, endpoint=True)
cdf_hat = cdf(breaks)
f_hat = | np.diff(cdf_hat) | numpy.diff |
from sklearn.metrics import coverage_error, accuracy_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import hamming_loss
from sklearn import metrics
from collections import Counter
import math
import numpy as np
def patk(predictions, labels):
pak = np.zeros(3)
K = np.array([1, 3, 5])
for i in range(predictions.shape[0]):
pos = np.argsort(-predictions[i, :])
y = labels[i, :]
y = y[pos]
for j in range(3):
k = K[j]
pak[j] += (np.sum(y[:k]) / k)
pak = pak / predictions.shape[0]
return pak
def cm_precision_recall(prediction, truth):
"""Evaluate confusion matrix, precision and recall for given set of labels and predictions
Args
prediction: a vector with predictions
truth: a vector with class labels
Returns:
cm: confusion matrix
precision: precision score
recall: recall score"""
confusion_matrix = Counter()
positives = [1]
binary_truth = [x in positives for x in truth]
binary_prediction = [x in positives for x in prediction]
for t, p in zip(binary_truth, binary_prediction):
confusion_matrix[t, p] += 1
cm = np.array([confusion_matrix[True, True], confusion_matrix[False, False], confusion_matrix[False, True],
confusion_matrix[True, False]])
# print cm
precision = (cm[0] / (cm[0] + cm[2] + 0.000001))
recall = (cm[0] / (cm[0] + cm[3] + 0.000001))
return cm, precision, recall
def bipartition_scores(labels, predictions):
""" Computes bipartitation metrics for a given multilabel predictions and labels
Args:
logits: Logits tensor, float - [batch_size, NUM_LABELS].
labels: Labels tensor, int32 - [batch_size, NUM_LABELS].
Returns:
bipartiation: an array with micro_precision, micro_recall, micro_f1,macro_precision, macro_recall, macro_f1"""
sum_cm = np.zeros((4))
macro_precision = 0
macro_recall = 0
for i in range(labels.shape[1]):
truth = labels[:, i]
prediction = predictions[:, i]
cm, precision, recall = cm_precision_recall(prediction, truth)
sum_cm += cm
macro_precision += precision
macro_recall += recall
macro_precision = macro_precision / labels.shape[1]
macro_recall = macro_recall / labels.shape[1]
# print(macro_recall, macro_precision)
macro_f1 = 2 * (macro_precision) * (macro_recall) / (macro_precision + macro_recall + 0.000001)
micro_precision = sum_cm[0] / (sum_cm[0] + sum_cm[2] + 0.000001)
micro_recall = sum_cm[0] / (sum_cm[0] + sum_cm[3] + 0.000001)
micro_f1 = 2 * (micro_precision) * (micro_recall) / (micro_precision + micro_recall + 0.000001)
bipartiation = np.asarray([micro_precision, micro_recall, micro_f1, macro_precision, macro_recall, macro_f1])
return bipartiation
def BAE(labels, predictions):
abs_error = (1 - predictions) * labels # consider error only for true classes
freq = np.sum(labels, axis=0) + 1e-15 # count the frequency of each label
num_labels = np.shape(labels)[1]
bae = np.sum( | np.sum(abs_error, axis=0) | numpy.sum |
import sentencepiece as spm
import re
import numpy as np
from collections import OrderedDict
from typing import List
from tensorlayerx import logging
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self):
self.data = {}
def add(self, word: str):
"""
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
The special key `""` is used to represent termination.
This function is idempotent, adding twice the same word will leave the trie unchanged
Example::
>>> trie = Trie()
>>> trie.add("Hello 友達")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
>>> trie.add("Hello")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
"""
if not word:
# Prevent empty string
return
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[""] = 1
def split(self, text: str) -> List[str]:
"""
Will look for the words added to the trie within `text`. Output is the original string splitted along the
boundaries of the words found.
This trie will match the longest possible word first !
Example::
>>> trie = Trie()
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS] This is a extra_id_100"]
>>> trie.add("[CLS]")
>>> trie.add("extra_id_1")
>>> trie.add("extra_id_100")
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS]", " This is a ", "extra_id_100"]
"""
# indexes are counted left of the chars index.
# "hello", index 0, is left of h, index 1 is between h and e.
# index 5 is right of the "o".
# States are going to capture every possible start (indexes as above)
# as keys, and have as values, a pointer to the position in the trie
# where we're at. This is a partial match for now.
# This enables to keep track of multiple matches while we're iterating
# the string
# If the trie contains, "blowing", and "lower" and we encounter the
# string "blower", we need to split into ["b", "lower"].
# This is where we need to keep track of multiple possible starts.
states = OrderedDict()
# This will contain every indices where we need
# to cut.
# We force to cut at offset 0 and len(text) (added later)
offsets = [0]
# This is used by the lookahead which needs to skip over
# some text where the full match exceeded the place in the initial
# for loop
skip = None
# Main loop, Giving this algorithm O(n) complexity
for current, current_char in enumerate(text):
if skip and current < skip:
# Prevents the lookahead for matching twice
# like extra_id_100 and id_100
continue
# This will track every state
# that stop matching, we need to stop tracking them.
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
# fail on "b", we need to remove 0 from the valid states.
to_remove = set()
# Whenever we found a match, we need to drop everything
# this is a greedy algorithm, it will match on the first found token
reset = False
# In this case, we already have partial matches (But unfinished)
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
# Lookahead to match longest first
# Important in case of extra_id_1 vs extra_id_100
# Here we are also actively looking for other earlier partial
# matches
# "[CLS]", "L", we need to match CLS even if L is special
for lookstart, looktrie_pointer in states.items():
if lookstart > start:
# This partial match is later, we can stop looking
break
elif lookstart < start:
# This partial match is earlier, the trie pointer
# was already updated, so index is + 1
lookahead_index = current + 1
end = current + 1
else:
# Here lookstart == start and
# looktrie_pointer == trie_pointer
# It wasn't updated yet so indices are current ones
lookahead_index = current
end = current
next_char = text[lookahead_index] if lookahead_index < len(text) else None
while next_char in looktrie_pointer:
looktrie_pointer = looktrie_pointer[next_char]
lookahead_index += 1
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
if lookahead_index == len(text):
# End of string
break
next_char = text[lookahead_index]
# End lookahead
# Storing and resetting
offsets.append(start)
offsets.append(end)
reset = True
break
elif current_char in trie_pointer:
# The current character being looked at has a match within the trie
# update the pointer (it will be stored back into states later).
trie_pointer = trie_pointer[current_char]
# Storing back the new pointer into the states.
# Partial matches got longer by one.
states[start] = trie_pointer
else:
# The new character has not match in the trie, we need
# to stop keeping track of this partial match.
# We can't do it directly within the loop because of how
# python iteration works
to_remove.add(start)
# Either clearing the full start (we found a real match)
# Or clearing only the partial matches that didn't work.
if reset:
states = {}
else:
for start in to_remove:
del states[start]
# If this character is a starting character within the trie
# start keeping track of this partial match.
if current_char in self.data:
states[current] = self.data[current_char]
# We have a cut at the end with states.
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
end = len(text)
offsets.append(start)
offsets.append(end)
# Longest cut is always the one with lower start so the first
# item so we need to break.
break
return self.cut_text(text, offsets)
def cut_text(self, text, offsets):
# We have all the offsets now, we just need to do the actual splitting.
# We need to eventually add the first part of the string and the eventual
# last part.
offsets.append(len(text))
tokens = []
start = 0
for end in offsets:
if start > end:
logging.error(
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
)
continue
elif start == end:
# This might happen if there's a match at index 0
# we're also preventing zero-width cuts in case of two
# consecutive matches
continue
tokens.append(text[start:end])
start = end
return tokens
class T5Transform(object):
def __init__(
self,
vocab_file=None,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
prefix="translate English to French: ",
next_prefix=None,
task="text",
source_max_length=512,
label_max_length=512,
**kwargs
):
self.vocab_file = vocab_file
self.eos_token = eos_token
self.unk_token = unk_token
self.pad_token = pad_token
self.extra_ids = extra_ids
self.source_max_length = source_max_length
self.prefix = prefix
self.task = task
self.next_prefix = next_prefix
self.label_max_length = label_max_length
if self.extra_ids > 0:
self.additional_special_tokens = [f"<extra_id_{i}>" for i in range(self.extra_ids)]
if self.eos_token is not None:
self.additional_special_tokens.append(self.eos_token)
if self.unk_token is not None:
self.additional_special_tokens.append(self.unk_token)
if self.pad_token is not None:
self.additional_special_tokens.append(self.pad_token)
self._create_trie(self.additional_special_tokens)
if self.vocab_file is None:
raise ValueError(f"vocab file is None.")
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
self.is_train = True
super(T5Transform, self).__init__(**kwargs)
def set_train(self):
self.is_train = True
def set_eval(self):
self.is_train = False
@property
def vocab_size(self):
return self.sp_model.get_piece_size() + self.extra_ids
def _create_trie(self, unique_no_split_tokens):
trie = Trie()
for token in unique_no_split_tokens:
if hasattr(self, "do_lower_case") and self.do_lower_case and token not in self.all_special_tokens:
trie.add(token.lower())
else:
trie.add(token)
self.tokens_trie = trie
def _tokenize(self, token):
return self.sp_model.encode(token, out_type=str)
def tokenize(self, text):
no_split_token = set(self.additional_special_tokens)
tokens = self.tokens_trie.split(text)
# ["This is something", "<special_token_1>", " else"]
for i, token in enumerate(tokens):
if token in no_split_token:
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
# We strip left and right by default
if right:
tokens[i + 1] = right.lstrip()
if left:
tokens[i - 1] = left.rstrip()
# ["This is something", "<special_token_1>", "else"]
tokenized_text = []
for token in tokens:
# Need to skip eventual empty (fully stripped) tokens
if not token:
continue
if token in no_split_token:
tokenized_text.append(token)
else:
tokenized_text.extend(self._tokenize(token))
# ["This", " is", " something", "<special_token_1>", "else"]
return tokenized_text
def convert_tokens_to_ids(self, tokens):
if tokens is None:
return None
if isinstance(tokens, str):
return self._convert_token_to_id_with_added_voc(tokens)
ids = []
for token in tokens:
ids.append(self._convert_token_to_id_with_added_voc(token))
return ids
def _convert_token_to_id_with_added_voc(self, token):
if token is None:
return None
return self._convert_token_to_id(token)
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
if token.startswith("<extra_id_"):
match = re.match(r"<extra_id_(\d+)>", token)
num = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index < self.sp_model.get_piece_size():
token = self.sp_model.IdToPiece(index)
else:
token = f"<extra_id_{self.vocab_size - 1 - index}>"
return token
def convert_tokens_to_string(self, tokens, remove_special_token=False):
"""Converts a sequence of tokens (string) in a single string."""
current_sub_tokens = []
out_string = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.additional_special_tokens:
if not remove_special_token:
out_string += self.sp_model.decode_pieces(current_sub_tokens) + token + " "
current_sub_tokens = []
else:
current_sub_tokens.append(token)
out_string += self.sp_model.decode_pieces(current_sub_tokens)
return out_string.strip()
def ids_to_string(self, ids, remove_special_token=True):
tokens = [self._convert_id_to_token(int(index)) for index in ids if index >= 0]
return self.convert_tokens_to_string(tokens, remove_special_token=remove_special_token)
def string_to_ids(self, text, max_length=None):
if isinstance(text, list):
input_ids = []
attention_masks = []
for i in text:
input_id, attention_mask = self.string_to_ids(i, max_length=max_length)
input_ids.append(input_id)
attention_masks.append(attention_mask)
return {"inputs": np.array(input_ids), "attention_mask": np.array(attention_masks)}
tokens = self.tokenize(text)
if max_length is None:
tokens = tokens + [self.eos_token]
attention_mask = [1] * len(tokens)
else:
if not isinstance(max_length, int):
raise ValueError(f"{max_length} is not int.")
else:
tokens_length = len(tokens)
if tokens_length >= (max_length - 1):
tokens = tokens[:max_length - 1] + [self.eos_token]
attention_mask = [1] * len(tokens)
else:
attention_mask = [1] * (len(tokens) + 1) + [0] * (max_length - tokens_length - 1)
tokens = tokens + [self.eos_token] + [self.pad_token] * (
max_length - tokens_length - 1)
ids = self.convert_tokens_to_ids(tokens)
return {"inputs": np.array(ids), "attention_mask": np.array(attention_mask)}
def process_token(self, text, label, source_max_length=None):
ids = []
labels = []
for token, l in zip(text, label):
token = self.tokenize(token)
id = self.convert_tokens_to_ids(token)
l = [l] * len(id)
ids += id
labels += l
if source_max_length is None:
ids = ids + self.convert_tokens_to_ids([self.eos_token])
attention_mask = [1] * len(ids)
labels = labels + [-100]
else:
if not isinstance(source_max_length, int):
raise ValueError(f"{source_max_length} is not int.")
else:
ids_length = len(ids)
if ids_length >= (source_max_length - 1):
ids = ids[:source_max_length - 1] + self.convert_tokens_to_ids([self.eos_token])
attention_mask = [1] * len(ids)
labels = labels[:source_max_length - 1] + [-100]
else:
attention_mask = [1] * (len(ids) + 1) + [0] * (source_max_length - ids_length - 1)
ids = ids + self.convert_tokens_to_ids([self.eos_token]) + self.convert_tokens_to_ids(
[self.pad_token]) * (source_max_length - ids_length - 1)
labels = labels + [-100] * (source_max_length - ids_length)
return {"inputs": np.array(ids), "attention_mask": np.array(attention_mask), "labels": np.array(labels)}, \
{"labels": | np.array(labels) | numpy.array |
import unittest
from ..utils import spectrogram
from ..trials import slice
from ..nodes import TFC
from ..dataset import DataSet
import numpy as np
FS = 256.
class TestTFC(unittest.TestCase):
def setUp(self):
data = np.array([np.sin(i * 4 * 60 * np.linspace(0, np.pi * 2, 60 * FS))
for i in range(16)])
labels = np.zeros(data.shape[1], dtype=np.int)
labels[[1000, 2000, 3000, 4000]] = 1
ids = np.arange(data.shape[1]) / FS
self.d = slice(DataSet(data, labels, ids), {1:'fake'}, [-512, 512])
def test_setup(self):
d = self.d
self.assertEqual(d.feat_shape, (16, 1024))
self.assertEqual(d.nclasses, 1)
self.assertEqual(d.ninstances, 4)
def test_tfc(self):
d = self.d
w_size, w_step = 64, 32
tfc = TFC(w_size, w_step)
tfc.train(d)
td = tfc.apply(d)
nwindows = int(np.floor((d.feat_shape[1] - w_size + w_step) /
float(w_step)))
self.assertEqual(td.feat_shape, (d.feat_shape[0], nwindows, w_size/2+1))
self.assertEqual(td.nclasses, d.nclasses)
self.assertEqual(td.ninstances, d.ninstances)
for ci in range(td.feat_shape[0]):
a = td.data[ci,:,:,0]
b = spectrogram(d.data[ci,:,0], w_size, w_step)
| np.testing.assert_equal(a, b) | numpy.testing.assert_equal |
import numpy as np
from scipy.interpolate import InterpolatedUnivariateSpline
import os,os.path
import re
from numpy.lib.recfunctions import append_fields
from . import localpath
class SN1a_feedback(object):
def __init__(self):
"""
this is the object that holds the feedback table for SN1a
.masses gives a list of masses
.metallicities gives a list of possible yield metallicities
.elements gives the elements considered in the yield table
.table gives a dictionary where the yield table for a specific metallicity can be queried
.table[0.02] gives a yield table.
Keys of this object are ['Mass','mass_in_remnants','elements']
Mass is in units of Msun
'mass_in_remnants' in units of Msun but with a '-'
'elements' yield in Msun normalised to Mass. i.e. integral over all elements is unity
"""
def Seitenzahl(self):
"""
Seitenzahl 2013 from Ivo txt
"""
y = np.genfromtxt(localpath + 'input/yields/Seitenzahl2013/0.02.txt', names = True, dtype = None)
self.metallicities = list([0.02])
self.masses = list([1.4004633930489443])
names = list(y.dtype.names)
self.elements = names[2:]
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Thielemann(self):
"""
Thilemann 2003 yields as compiled in Travaglio 2004
"""
y = np.genfromtxt(localpath + 'input/yields/Thielemann2003/0.02.txt', names = True, dtype = None)
metallicity_list = [0.02]
self.metallicities = metallicity_list
self.masses = [1.37409]
names = y.dtype.names
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
for name in names:
if name in ['Mass','mass_in_remnants']:
yield_tables_final_structure_subtable[name] = y[name]
else:
yield_tables_final_structure_subtable[name] = np.divide(y[name],self.masses)
self.elements = list(y.dtype.names[2:])
yield_tables_final_structure = {}
yield_tables_final_structure[0.02] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
def Iwamoto(self):
'''
Iwamoto99 yields building up on Nomoto84
'''
import numpy.lib.recfunctions as rcfuncs
tdtype = [('species1','|S4'),('W7',float),('W70',float),('WDD1',float),('WDD2',float),('WDD3',float),('CDD1',float),('CDD2',float)]
metallicity_list = [0.02,0.0]
self.metallicities = metallicity_list
self.masses = [1.38]
y = np.genfromtxt(localpath + 'input/yields/Iwamoto/sn1a_yields.txt',dtype = tdtype, names = None)
## Python3 need transformation between bytes and strings
element_list2 = []
for j,jtem in enumerate(y['species1']):
element_list2.append(jtem.decode('utf8'))
y = rcfuncs.append_fields(y,'species',element_list2,usemask = False)
################################
without_radioactive_isotopes=True
if without_radioactive_isotopes:### without radioactive isotopes it should be used this way because the radioactive nuclides are already calculated in here
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne']#,'22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg']#,'26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar']#, '36Cl']
potassium_list = ['39K','41K']#, '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca']#, '40K']
scandium_list = ['45Sc']#,'44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti']#,'48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr']#,'53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe']#,'56Co','57Co']
cobalt_list = ['59Co']#,'60Fe','56Ni','57Ni','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni']#,'60Co']
copper_list = ['63Cu','65Cu']#,'63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
##### with radioactive isotopes (unclear weather they are double, probably not but remnant mass is too big)
else:
carbon_list = ['12C','13C']
nitrogen_list = ['14N','15N']
oxygen_list = ['16O','17O','18O']
fluorin_list = ['19F']
neon_list = ['20Ne','21Ne','22Ne','22Na']
sodium_list = ['23Na']
magnesium_list = ['24Mg','25Mg','26Mg','26Al']
aluminium_list = ['27Al']
silicon_list = ['28Si','29Si','30Si']
phosphorus_list = ['31P']
sulfur_list = ['32S','33S','34S','36S']
chlorine_list = ['35Cl','37Cl']
argon_list = ['36Ar','38Ar','40Ar', '36Cl']
potassium_list = ['39K','41K', '39Ar', '41Ca']
calcium_list = ['40Ca','42Ca','43Ca','44Ca','46Ca','48Ca', '40K']
scandium_list = ['45Sc','44Ti']
titanium_list = ['46Ti','47Ti','48Ti','49Ti','50Ti','48V','49V']
vanadium_list = ['50V','51V']
chromium_list = ['50Cr','52Cr','53Cr','54Cr','53Mn']
manganese_list = ['55Mn']
iron_list = ['54Fe', '56Fe','57Fe','58Fe','56Co','57Co','56Ni','57Ni']
cobalt_list = ['59Co','60Fe','59Ni']
nickel_list = ['58Ni','60Ni','61Ni','62Ni','64Ni','60Co']
copper_list = ['63Cu','65Cu','63Ni']
zinc_list = ['64Zn','66Zn','67Zn','68Zn']
indexing = {}
indexing['C'] = carbon_list
indexing['N'] = nitrogen_list
indexing['O'] = oxygen_list
indexing['F'] = fluorin_list
indexing['Ne'] = neon_list
indexing['Na'] = sodium_list
indexing['Mg'] = magnesium_list
indexing['Al'] = aluminium_list
indexing['Si'] = silicon_list
indexing['P'] = phosphorus_list
indexing['S'] = sulfur_list
indexing['Cl'] = chlorine_list
indexing['Ar'] = argon_list
indexing['K'] = potassium_list
indexing['Ca'] = calcium_list
indexing['Sc'] = scandium_list
indexing['Ti'] = titanium_list
indexing['V'] = vanadium_list
indexing['Cr'] = chromium_list
indexing['Mn'] = manganese_list
indexing['Fe'] = iron_list
indexing['Co'] = cobalt_list
indexing['Ni'] = nickel_list
indexing['Cu'] = copper_list
indexing['Zn'] = zinc_list
self.elements = list(indexing.keys())
#################################
yield_tables_final_structure = {}
for metallicity_index,metallicity in enumerate(metallicity_list[:]):
if metallicity == 0.02:
model = 'W7'
elif metallicity == 0.0:
model = 'W70'
else:
print('this metallicity is not represented in the Iwamoto yields. They only have solar (0.02) and zero (0.0001)')
additional_keys = ['Mass', 'mass_in_remnants']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = self.masses[0]
total_mass = []
for i,item in enumerate(self.elements):
for j,jtem in enumerate(indexing[item]):
cut = np.where(y['species']==jtem)
yield_tables_final_structure_subtable[item] += y[model][cut]
total_mass.append(y[model][cut])
yield_tables_final_structure_subtable['mass_in_remnants'] = -sum(total_mass)
for i,item in enumerate(self.elements):
yield_tables_final_structure_subtable[item] = np.divide(yield_tables_final_structure_subtable[item],-yield_tables_final_structure_subtable['mass_in_remnants'])
yield_tables_final_structure[metallicity] = yield_tables_final_structure_subtable
self.table = yield_tables_final_structure
class SN2_feedback(object):
def __init__(self):
"""
This is the object that holds the feedback table for CC-SN.
Different tables can be loaded by the methods.
"""
def Portinari(self):
'''
Loading the yield table from Portinari1998.
'''
self.metallicities = [0.0004,0.004,0.008,0.02,0.05]
x = np.genfromtxt(localpath + 'input/yields/Portinari_1998/0.02.txt',names=True)
self.masses = list(x['Mass'])
self.elements = list(x.dtype.names[3:])
yield_tables_final_structure = {}
for metallicity in self.metallicities:
additional_keys = ['Mass', 'mass_in_remnants','unprocessed_mass_in_winds']
names = additional_keys + self.elements
base = np.zeros(len(self.masses))
list_of_arrays = []
for i in range(len(names)):
list_of_arrays.append(base)
yield_tables_final_structure_subtable = np.core.records.fromarrays(list_of_arrays,names=names)
yield_tables_final_structure_subtable['Mass'] = np.array(self.masses)
x = | np.genfromtxt(localpath + 'input/yields/Portinari_1998/%s.txt' %(metallicity),names=True) | numpy.genfromtxt |
"""
Classes to simplify and standardize the process of drawing samples from the posterior distribution in
Bayesian inference problems.
"""
import numpy as np
import scipy as sp
class Quad_Sampler(object):
"""
Class for drawing samples from an arbitrary one-dimensional probability distribution using numerical integration
and interpolation. In general this will be superior to more sophisticated sampling methods for 1D problems.
Assumes that priors are uniform.
Args:
ln_likelihood: Function which takes the independent variable x as its first argument and returns the log
of the likelihood function, p(d|x,I), up to a constant. May take other *args or **kwargs.
priors: List-type of the form [a,b], where a and b define the upper and lower bounds of the uniform
prior p(x|I).
optioinal:
vect: (bool) Set to true if the log-likelihood accepts a vectorized input.
"""
def __init__(self, ln_likelihood, priors, vect=False):
self._ln_likelihood = ln_likelihood
self._a, self._b = priors
self._vect = vect
# Default values
self.ln_Z = np.nan
self.mean = np.nan
self.std = np.nan
def fit(self, n_pts=200, args=(), **kwargs):
"""
Perform the fit.
Optional:
n_pts: (int) Number of evenly-spaced points over which to compute the probability.
args: (tuple) All additional arguments to be passed on the the likelihood function.
**kwargs: All other keywords are passed on the the likelihood function.
"""
# Evaluate the pdf
self.xs = np.linspace(self._a, self._b, num=n_pts)
if self._vect:
self.ln_pdf = self._ln_likelihood(self.xs, *args, **kwargs)
else:
self.ln_pdf = np.array([self._ln_likelihood(x, *args, **kwargs) for x in self.xs])
# Rescale with the maxima
ln_C = np.amax(self.ln_pdf)
pdf_scaled = np.exp(self.ln_pdf - ln_C)
# Compute the evidence and rescale
Z_scaled = np.trapz(pdf_scaled, x=self.xs)
self.ln_Z = np.log(Z_scaled) + ln_C
self.pdf = pdf_scaled / Z_scaled
self.cdf = sp.integrate.cumtrapz(self.pdf, x=self.xs, initial=0)
# Estimate summary statistics - assuming a normal distribution
samples = self.get_samples(1000)
self.mean = np.mean(samples)
self.std = np.std(samples)
def get_samples(self, n_samples):
"""
"""
u_samp = np.random.rand(n_samples)
return np.interp(u_samp, self.cdf, self.xs)
class Quad_Sampler_ND(object):
"""
Class for drawing samples from an arbitrary N-dimensional probability distribution using numerical integration
and interpolation. This can be useful for problems with a low number of dimensions (~3) for which the likelihood
function can be computed quickly (<< 1 second).
Assumes that priors are uniform. Currently does not support vectorized likelihoods.
Args:
ln_likelihood: Function which takes the independent variables (x1, x2, ..., xN) as its first argument and returns
the log of the likelihood function, p(d|x1,...,I), up to a constant. May take other *args or **kwargs.
priors: List of tuples, of the form [(a1,b1), (a2,b2), ..., (aN,bN)] where a and b define the upper and lower bounds
of the uniform prior p(x1,...|I).
optioinal:
vect: (bool) Set to true if the log-likelihood accepts a vectorized input.
"""
def __init__(self, ln_likelihood, ndim, priors):
self._ln_likelihood = ln_likelihood
self.ndim = ndim
self._a = np.zeros(self.ndim)
self._b = np.zeros(self.ndim)
for n in range(self.ndim):
self._a[n], self._b[n] = priors[n]
# Default values
self.ln_Z = np.nan
self.mean = np.nan
self.std = np.nan
def fit(self, n_pts=200, args=(), **kwargs):
"""
Perform the fit.
Optional:
n_pts: (int) Number of evenly-spaced points over which to compute the probability.
args: (tuple) All additional arguments to be passed on the the likelihood function.
**kwargs: All other keywords are passed on the the likelihood function.
This doesn't work yet.
"""
# Construct the evaluation grid
self.xs = np.zeros([self.ndim, n_pts])
for n in range(slef.ndim):
self.xs[n,:] = np.linspace(self._a[n], self._b[n], num=n_pts)
# Evaluate the pdf
self.ln_pdf = np.zeros([self.ndim, n_pts])
for n in range(slef.ndim):
self.ln_pdf[n] = np.array([self._ln_likelihood(x, *args, **kwargs) for x in self.xs[n]])
# Rescale with the maxima
ln_C = np.amax(self.ln_pdf)
pdf_scaled = np.exp(self.ln_pdf - ln_C)
# Compute the evidence and rescale
Z_scaled = np.trapz(pdf_scaled, x=self.xs)
self.ln_Z = np.log(Z_scaled) + ln_C
self.pdf = pdf_scaled / Z_scaled
self.cdf = sp.integrate.cumtrapz(self.pdf, x=self.xs, initial=0)
# Estimate summary statistics - assuming a normal distribution
samples = self.get_samples(1000)
self.mean = np.mean(samples)
self.std = np.std(samples)
def get_samples(self, n_samples):
"""
"""
u_samp = np.random.rand(n_samples)
return np.interp(u_samp, self.cdf, self.xs)
# ------------------------------------------------ MESXR Sampler ------------------------------------------------
import time
import emcee
import mst_ida.models.mesxr3 as m3
import mst_ida.data.mesxr as mesxr
import mst_ida.analysis.ida as ida
import mst_ida.analysis.emissivity as em
import mst_ida.models.base.response as rsp
from mst_ida.utilities.functions import identify_outliers
from mst_ida.models.base.geometry import flux_coords, sunflower_points
default_priors = {
'alpha':((10, 14), (0.1,18), (0.1,18))
}
class MESXR_Emiss_Sampler(object):
"""
"""
def __init__(self, shot, frame, flux=None, Ec_ref=3.0, priors=None, indices=np.arange(5,55), Ew=300.,
method='alpha', nwalkers=32, center=True, delta_a=0.06, delta_h=0.01, manual=None):
# Load the data
self.shot = shot
if manual is not None:
self.mesxr_data = manual['data']
self.mesxr_sigmas = manual['sigmas']
self.signed_ps = manual['impact_p']
self.thresholds = manual['thresholds']
else:
self.frame = frame
self.mesxr_data, self.mesxr_sigmas, self.signed_ps, self.thresholds = mesxr.get_8c_data(self.shot, self.frame, center=center)
# Model and geometry
if flux is None:
self.flux = flux_coords(delta_a=delta_a, delta_h=delta_h)
else:
self.flux = flux
self.method = method
self.p3det = m3.MESXR(shot=self.shot, center=center)
self.gij_set = {}
self.ss_set = {}
for Ec in self.thresholds:
self.gij_set[Ec], self.ss_set[Ec] = em.get_geometry_matrix(self.flux, self.p3det)
# Include specified data points
self.indices = np.arange(6,55)
z = {Ec:np.maximum(self.mesxr_data[Ec][self.indices]+1, 1) for Ec in self.thresholds}
self.ln_data_fact = {Ec:-np.sum(sp.special.loggamma(z[Ec])) for Ec in self.thresholds}
# Set up the priors
if priors is None:
self.priors = default_priors[self.method]
else:
self.priors = priors
# Sampler parameters
self.nwalkers = nwalkers
self.pos0 = self.get_pos0()
self.ndim = self.pos0.shape[1]
# Set up the samplers
moves = [(emcee.moves.DEMove(), 0.8), (emcee.moves.DESnookerMove(), 0.2),]
self.samplers = {}
for index, Ec in enumerate(self.thresholds):
self.samplers[Ec] = emcee.EnsembleSampler(self.nwalkers, self.ndim, self.ln_prob, moves=moves, kwargs={'Ec':Ec})
# Set up ratio curves
self.Ew = Ew
self.Ec_ref = Ec_ref
self.temps = np.linspace(10, 3000, num=6000)
self.ratios= {}
for Ec in self.thresholds:
if Ec != self.Ec_ref:
self.ratios[Ec] = np.array([self.model_ratio(Te,Ec*1000.) for Te in self.temps])
def fit(self, nsteps=10000, remove_outliers=True, resume=False, burn_step=3000, n_samples=5000, progress=True):
"""
"""
# MCMC sampling
if nsteps is not None:
for Ec in self.thresholds:
if not resume:
#print('Beginning sampling for Ec = ' + str(Ec) + ' keV')
time.sleep(1)
self.samplers[Ec].run_mcmc(self.pos0, nsteps, progress=progress)
else:
#print('Resuming sampling for Ec = ' + str(Ec) + ' keV')
time.sleep(1)
self.samplers[Ec].run_mcmc(None, nsteps, progress=progress)
self.samples = {Ec:self.samplers[Ec].get_chain(discard=burn_step, flat=True) for Ec in self.thresholds}
# Remove points from poorly-converged walkers
if remove_outliers:
for Ec in self.thresholds:
self.samples[Ec] = identify_outliers(self.samples[Ec])
# Save the average fit parameters
self.theta_avg = {Ec:np.average(self.samples[Ec], axis=0) for Ec in self.thresholds}
# Get the emissivity profile samples
self.n_samples = n_samples
self.emiss_samples = {Ec:self.get_emiss_samples(self.samples[Ec], Ec=Ec, n_samples=n_samples) for Ec in self.thresholds}
self.emiss_CIs = {Ec:ida.profile_confidence(self.emiss_samples[Ec]) for Ec in self.thresholds}
def get_Te_samples(self, slim=0.7, include=[4.0, 5.0]):
"""
"""
ss = self.ss_set[self.Ec_ref].ravel()
sn = np.argmin(np.abs(ss - slim))
s_vals = self.ss_set[self.Ec_ref].ravel()[:sn]
Te_avg_prof_samples = np.zeros([self.n_samples, sn])
for s_index in range(sn):
ratios = {Ec:self.emiss_samples[Ec][:,s_index]/self.emiss_samples[self.Ec_ref][:,s_index] for Ec in include}
Te_samples = {Ec:self.Te_from_R(ratios[Ec], Ec=Ec) for Ec in include}
Te_avg_prof_samples[:,s_index] = sum([Te_samples[Ec] for Ec in include]) / len(include)
Te_avg_CI = ida.profile_confidence(Te_avg_prof_samples)
return s_vals, Te_avg_prof_samples, Te_avg_CI
# ----------------------------------------------- Emissivity Model -----------------------------------------------
def emiss_model_alpha(self, ss, Xem, alpha, beta):
return (10.**Xem)*(1 - ss**alpha)**beta
def emiss_model(self, *args):
if self.method == 'alpha':
return self.emiss_model_alpha(*args)
else:
raise KeyError('Please select a valid fitting method.')
def get_model(self, theta, Ec=3.0):
gij = self.gij_set[Ec]
ss = self.ss_set[Ec]
emiss = self.emiss_model(ss, *theta)
bright = np.dot(gij, emiss).squeeze()
return self.p3det.etendue[Ec]*bright
# ----------------------------------------------- Bayesian Methods -----------------------------------------------
def ln_prob(self, theta, Ec=3.0):
lp = self.ln_prior(theta)
if np.isfinite(lp):
return lp + self.ln_likelihood(theta, Ec=Ec)
else:
return -np.inf
def ln_likelihood(self, theta, Ec=3.0):
data = self.mesxr_data[Ec][self.indices]
model = self.get_model(theta, Ec=Ec)[self.indices]
return -np.sum(model - data*np.log(model)) + self.ln_data_fact[Ec]
def ln_prior(self, theta):
if self.method == 'alpha':
return self.ln_prior_alpha(*theta)
else:
raise KeyError('Method not recognized.')
def ln_prior_alpha(self, Xem, alpha, beta):
X_min, X_max = self.priors[0]
al_min, al_max = self.priors[1]
bt_min, bt_max = self.priors[2]
if (X_min < Xem < X_max) and (al_min < alpha < al_max) and (bt_min < beta < bt_max):
return 0.0
else:
return - np.inf
def get_pos0(self):
if self.method == 'alpha':
X_min, X_max = self.priors[0]
al_min, al_max = self.priors[1]
bt_min, bt_max = self.priors[2]
pos0 = np.zeros([self.nwalkers, 3])
pos0[:,0] = (X_max - X_min)*np.random.random(size=self.nwalkers) + X_min
pos0[:,1] = (al_max - al_min)*np.random.random(size=self.nwalkers) + al_min
pos0[:,2] = (bt_max - bt_min)*np.random.random(size=self.nwalkers) + bt_min
return pos0
# ----------------------------------------------- Ratio Model -----------------------------------------------
def Te_from_R(self, rs, Ec=4.0):
if Ec > self.Ec_ref:
return np.interp(rs, self.ratios[Ec], self.temps)
else:
# Reverse to avoid interpolation error
return np.interp(rs, np.flip(self.ratios[Ec]), np.flip(self.temps))
def get_en_int(self, Te, Ec):
"""
Model the local emissivity, to a constant factor.
"""
en = np.linspace(1500, 20000, num=1000)
resp = rsp.Pilatus_Response(Ec, self.Ew)
return np.trapz(resp(en)*np.exp(-en/Te)/ | np.sqrt(Te) | numpy.sqrt |
import numpy as np
K = np.array([-10.3, -3.0, 1.0]) # Original triangle
L = np.array([12.0, -4.0, 1.0])
M = np.array([11.0, 10.0, 1.0])
K1 = np.array([ -2.0, -8.0, 1.0]) # Image triangle
L1 = np.array([ -5.0, 4.0, 1.0])
M1 = np.array([ -2.0, 1.0, 1.0])
MA1 = np.matrix([K, L, M])
MA2 = np.matrix([K1, L1, M1])
print(np.linalg.inv(MA1))
print(MA2)
res = | np.linalg.inv(MA1) | numpy.linalg.inv |
import numpy as np
def mean_or_nan(xs):
"""Return its mean a non-empty sequence, numpy.nan for a empty one."""
return | np.mean(xs) | numpy.mean |
"""
MIT License
Copyright (c) 2020 vqdang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy import ndimage as ndi
from typing import Tuple, Dict
from src.utils import bounding_box, center_crop, remove_small_objects
# ported from
# https://github.com/vqdang/hover_net/blob/195ed9b6cc67b12f908285492796fb5c6c15a000/src/loader/augs.py#L21
def gen_hv_maps(
inst_map: np.ndarray,
crop_shape: Tuple[int]=(256, 256)
) -> Dict[str, np.ndarray]:
"""
Generates horizontal and vertical maps from instance labels
The map is calculated only for instances within the crop portion
but based on the original shape in original image.
Perform following operation:
Obtain the horizontal and vertical distance maps for each
nuclear instance.
Args:
---------
inst_map (np.ndarray):
inst map
crop_shape (Tuple[int]):
crop shape if network output smaller dims than the input
Returns:
---------
Dict[str, np.ndarray]: Dict containing keys "xmap" & "ymap".
"xmap" maps to horizontal gradient map and "ymap" maps to
vertical gradient map of the input mask. Both are of shape:
(H, W)
"""
if inst_map.shape[0] > crop_shape[0]:
inst_map = center_crop(inst_map, crop_shape[0], crop_shape[1])
remove_small_objects(inst_map, min_size=30, out=inst_map)
x_map = np.zeros_like(inst_map, dtype=np.float32)
y_map = np.zeros_like(inst_map, dtype=np.float32)
inst_list = list(np.unique(inst_map))
inst_list.remove(0) # 0 is background
for inst_id in inst_list:
inst = np.array(inst_map == inst_id, np.int32)
y1, y2, x1, x2 = bounding_box(inst)
y1 = y1 - 2 if y1 - 2 >= 0 else y1
x1 = x1 - 2 if x1 - 2 >= 0 else x1
x2 = x2 + 2 if x2 + 2 <= inst_map.shape[1] - 1 else x2
y2 = y2 + 2 if y2 + 2 <= inst_map.shape[0] - 1 else y2
inst = inst[y1:y2, x1:x2]
# instance center of mass, rounded to nearest pixel
inst_com = list(ndi.measurements.center_of_mass(inst))
inst_com[0] = int(inst_com[0] + 0.5)
inst_com[1] = int(inst_com[1] + 0.5)
inst_x_range = np.arange(1, inst.shape[1]+1)
inst_y_range = np.arange(1, inst.shape[0]+1)
# shifting center of pixels grid to instance center of mass
inst_x_range -= inst_com[1]
inst_y_range -= inst_com[0]
inst_x, inst_y = np.meshgrid(inst_x_range, inst_y_range)
# remove coord outside of instance
inst_x[inst == 0] = 0
inst_y[inst == 0] = 0
inst_x = inst_x.astype('float32')
inst_y = inst_y.astype('float32')
# normalize min into -1 scale
if np.min(inst_x) < 0:
inst_x[inst_x < 0] /= (-np.amin(inst_x[inst_x < 0]))
if np.min(inst_y) < 0:
inst_y[inst_y < 0] /= (- | np.amin(inst_y[inst_y < 0]) | numpy.amin |
# text_file = open("../build/example.txt", "r")
# lines = text_file.readlines()
# print lines
# # print len(lines)
# text_file.close()
import math as math
import numpy as np
# import math as math
# import matplotlib
# import matplotlib.cm as cm
# import matplotlib.mlab as mlab
# import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d import Axes3D
# from scipy.constants import pi
# from PyKEP import epoch, DAY2SEC, AU, MU_SUN, lambert_problem, epoch_from_string
# from PyKEP.planet import jpl_lp
# from PyKEP.orbit_plots import plot_planet, plot_lambert, plot_kepler
from numpy import loadtxt
import matplotlib.pyplot as plt
# import numpy as np
mylegend = ['0 revolutions','1 revolution','2 revolutions','3 revolutions','4 revolutions','5 revolutions']
# test1 = [4,3,2,1,0]#0,5,1)
test1 = np.arange(0,5,1)
for j in (test1):
filename = "../data/dv_" + str(j) + ".txt"
# print filename
counter = loadtxt("../data/i.txt", comments="#", delimiter=" ", unpack=False)
dV = loadtxt(filename, comments="#", delimiter=" ", unpack=False)
# for i in np.size(counter):cd
# np.array(i) = dV[i]
# print dV, np.size(dV)
# print counter, np.size(counter)
dvmin = np.amin(dV)
# print dvmin
total = np.ndarray(shape=(np.size(dV),2), dtype=float, order='F')
# total[1,1] = 5
for i in np.arange(0,np.size(dV)):
total[i,0] = dV[i]
total[i,1] = counter[i]
# itemindex = 1
itemindex = np.where(total[:,0]<100000)
indexs = np.amin(itemindex)
total = np.delete(total, np.arange(0,indexs,1), 0)
plt.plot(total[:,1], total[:,0],label=mylegend[j])
# plt.axis([0, np.size(counter)*0.3/10000, 0, 2500])#np.amax(dV)*1.1])
plt.axis([0, | np.size(counter) | numpy.size |
from osgeo import gdal
import math
import matplotlib.pyplot as plt
import numpy as np
import os
from osgeo import gdal_array
import pandas as pd
import uuid
import warnings
from .pipesegment import PipeSegment, LoadSegment, MergeSegment
class Image:
def __init__(self, data, name='image', metadata={}):
self.name = name
self.metadata = metadata
self.set_data(data)
def set_data(self, data):
if isinstance(data, np.ndarray) and data.ndim == 2:
data = | np.expand_dims(data, axis=0) | numpy.expand_dims |
"""
This implements a shooting trajectory optimization algorithm.
The closest known algorithm is perhaps MPPI and hence we stick to that terminology.
Uses a filtered action sequence to generate smooth motions.
"""
import numpy as np
from trajopt.algos.trajopt_base import Trajectory
from trajopt.utils import gather_paths_parallel
from trajopt.envs.herb_pushing_env import HerbEnv
import trimesh
import cv2
import os
import open3d as o3d
class MPPI(Trajectory):
def __init__(self, run_num, env, top_dir, task, H, paths_per_cpu,
num_cpu=1,
kappa=1.0,
gamma=1.0,
mean=None,
filter_coefs=None,
default_act='repeat',
seed=123
):
self.top_dir=top_dir
self.task=task
self.env, self.seed = env, seed
self.n, self.m = env.observation_dim, env.action_dim
self.H, self.paths_per_cpu, self.num_cpu = H, paths_per_cpu, num_cpu
self.mean, self.filter_coefs, self.kappa, self.gamma = mean, filter_coefs, kappa, gamma
if mean is None:
self.mean = np.zeros(self.m)
if filter_coefs is None:
self.filter_coefs = [np.ones(self.m), 1.0, 0.0, 0.0]
self.default_act = default_act
self.sol_state = []
self.sol_act = []
self.sol_reward = []
self.sol_obs = []
self.act_sequences=[]
self.env.reset_model(seed=self.seed)
self.sol_state.append(self.env.get_env_state().copy())
self.sol_obs.append(self.env._get_obs())
self.act_sequence = np.repeat(np.expand_dims(self.sol_state[-1]['qp'][:15], 0), self.H, axis=0)
#self.act_sequence = np.ones((self.H, self.m)) * self.mean
self.env_pool=[HerbEnv(top_dir, None, run_num, obs=False, task=self.task) for i in range(self.num_cpu)]
def update(self, paths):
num_traj = len(paths)
act = np.array([paths[i]["actions"] for i in range(num_traj)])
vel= np.array([paths[i]["vels"] for i in range(num_traj)])
R = self.score_trajectory(paths)
S = np.exp(self.kappa*(R-np.max(R)))
best_reward=-float('inf')
for i in range(len(paths)):
if paths[i]["rewards"][-1]>best_reward:
best_reward=paths[i]["rewards"][-1]
# blend the action sequence
weighted_seq = S*act.T
act_sequence = np.sum(weighted_seq.T, axis=0)/(np.sum(S) + 1e-6)
weighted_vels = S*vel.T
weighted_vels = | np.sum(weighted_vels.T, axis=0) | numpy.sum |
import itertools
import cmath
import h5py
from pauxy.systems.hubbard import Hubbard
from pauxy.trial_wavefunction.free_electron import FreeElectron
from pauxy.trial_wavefunction.uhf import UHF
from pauxy.trial_wavefunction.harmonic_oscillator import HarmonicOscillator
from pauxy.estimators.ci import simple_fci_bose_fermi, simple_fci
from pauxy.estimators.hubbard import local_energy_hubbard_holstein, local_energy_hubbard
from pauxy.systems.hubbard_holstein import HubbardHolstein
from pauxy.utils.linalg import reortho
from pauxy.estimators.greens_function import gab_spin
import time
from pauxy.utils.linalg import diagonalise_sorted
from pauxy.estimators.greens_function import gab_spin
import scipy
from scipy.linalg import expm
import scipy.sparse.linalg
from scipy.optimize import minimize
try:
from jax.config import config
config.update("jax_enable_x64", True)
import jax
from jax import grad, jit
import jax.numpy as np
import jax.scipy.linalg as LA
import numpy
except ModuleNotFoundError:
import numpy
np = numpy
def jit(function):
def wrapper():
function
return wrapper()
import math
@jit
def gab(A, B):
r"""One-particle Green's function.
This actually returns 1-G since it's more useful, i.e.,
.. math::
\langle \phi_A|c_i^{\dagger}c_j|\phi_B\rangle =
[B(A^{\dagger}B)^{-1}A^{\dagger}]_{ji}
where :math:`A,B` are the matrices representing the Slater determinants
:math:`|\psi_{A,B}\rangle`.
For example, usually A would represent (an element of) the trial wavefunction.
.. warning::
Assumes A and B are not orthogonal.
Parameters
----------
A : :class:`numpy.ndarray`
Matrix representation of the bra used to construct G.
B : :class:`numpy.ndarray`
Matrix representation of the ket used to construct G.
Returns
-------
GAB : :class:`numpy.ndarray`
(One minus) the green's function.
"""
# Todo: check energy evaluation at later point, i.e., if this needs to be
# transposed. Shouldn't matter for Hubbard model.
inv_O = np.linalg.inv((A.conj().T).dot(B))
GAB = B.dot(inv_O.dot(A.conj().T))
return GAB
@jit
def local_energy_hubbard_holstein_jax(T,U,g,m,w0, G, X, Lap, Ghalf=None):
r"""Calculate local energy of walker for the Hubbard-Hostein model.
Parameters
----------
system : :class:`HubbardHolstein`
System information for the HubbardHolstein model.
G : :class:`numpy.ndarray`
Walker's "Green's function"
Returns
-------
(E_L(phi), T, V): tuple
Local, kinetic and potential energies of given walker phi.
"""
nbasis = T[0].shape[1]
ke = np.sum(T[0] * G[0] + T[1] * G[1])
pe = U * np.dot(G[0].diagonal(), G[1].diagonal())
pe_ph = 0.5 * w0 ** 2 * m * np.sum(X * X)
ke_ph = -0.5 * np.sum(Lap) / m - 0.5 * w0 * nbasis
rho = G[0].diagonal() + G[1].diagonal()
e_eph = - g * np.sqrt(m * w0 * 2.0) * np.dot(rho, X)
etot = ke + pe + pe_ph + ke_ph + e_eph
Eph = ke_ph + pe_ph
Eel = ke + pe
Eeb = e_eph
return (etot, ke+pe, ke_ph+pe_ph+e_eph)
def gradient(x, nbasis, nup, ndown, T, U, g, m, w0, c0,restricted,restricted_shift):
grad = numpy.array(jax.grad(objective_function)(x, nbasis, nup, ndown, T, U, g, m, w0, c0,restricted,restricted_shift))
return grad
def hessian(x, nbasis, nup, ndown, T, U, g, m, w0, c0, restricted):
H = numpy.array(jax.hessian(objective_function)(x, nbasis, nup, ndown, T, U, g, m, w0, c0,restricted,restricted_shift))
return H
def hessian_product(x, p, nbasis, nup, ndown, T, U, g, m, w0, c0):
h = 1e-5
xph = x + p * h
xmh = x - p * h
gph = gradient(xph, nbasis, nup, ndown, T, U, g, m, w0, c0)
gmh = gradient(xmh, nbasis, nup, ndown, T, U, g, m, w0, c0)
Hx = (gph - gmh) / (2.0 * h)
return Hx
@jit
def compute_exp(Ua, tmp, theta_a):
for i in range(1,50):
tmp = np.einsum("ij,jk->ik", theta_a, tmp)
Ua += tmp / math.factorial(i)
return Ua
def compute_greens_function_from_x (x, nbasis, nup, ndown, c0, restricted):
shift = x[0:nbasis]
nbsf = nbasis
nocca = nup
noccb = ndown
nvira = nbasis - nocca
nvirb = nbasis - noccb
nova = nocca*nvira
novb = noccb*nvirb
daia = np.array(x[nbsf:nbsf+nova],dtype=np.float64)
daib = np.array(x[nbsf+nova:nbsf+nova+novb],dtype=np.float64)
daia = daia.reshape((nvira, nocca))
daib = daib.reshape((nvirb, noccb))
if (restricted):
daib = jax.ops.index_update(daib, jax.ops.index[:,:], daia)
theta_a = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_b = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[nocca:nbsf,:nocca], daia)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[:nocca, nocca:nbsf], -np.transpose(daia))
theta_b = jax.ops.index_update(theta_b, jax.ops.index[noccb:nbsf,:noccb], daib)
theta_b = jax.ops.index_update(theta_b, jax.ops.index[:noccb, noccb:nbsf], -np.transpose(daib))
Ua = np.eye(nbsf,dtype=np.float64)
tmp = np.eye(nbsf,dtype=np.float64)
Ua = compute_exp(Ua, tmp, theta_a)
C0a = np.array(c0[:nbsf*nbsf].reshape((nbsf,nbsf)),dtype=np.float64)
Ca = C0a.dot(Ua)
Ga = gab(Ca[:,:nocca], Ca[:,:nocca])
if (noccb > 0):
C0b = np.array(c0[nbsf*nbsf:].reshape((nbsf,nbsf)),dtype=np.float64)
Ub = np.eye(nbsf)
tmp = np.eye(nbsf)
Ub = compute_exp(Ub, tmp, theta_b)
Cb = C0b.dot(Ub)
Gb = gab(Cb[:,:noccb], Cb[:,:noccb])
else:
Gb = numpy.zeros_like(Ga)
G = np.array([Ga, Gb],dtype=np.float64)
return G
def objective_function (x, nbasis, nup, ndown, T, U, g, m, w0, c0, restricted, restricted_shift):
nbasis = int(round(nbasis))
nup = int(round(nup))
ndown = int(round(ndown))
shift = x[0:nbasis]
nbsf = nbasis
nocca = nup
noccb = ndown
nvira = nbasis - nocca
nvirb = nbasis - noccb
nova = nocca*nvira
novb = noccb*nvirb
daia = np.array(x[nbsf:nbsf+nova],dtype=np.float64)
daib = np.array(x[nbsf+nova:nbsf+nova+novb],dtype=np.float64)
daia = daia.reshape((nvira, nocca))
daib = daib.reshape((nvirb, noccb))
if (restricted):
daib = jax.ops.index_update(daib, jax.ops.index[:,:], daia)
theta_a = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_b = np.zeros((nbsf, nbsf),dtype=np.float64)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[nocca:nbsf,:nocca], daia)
theta_a = jax.ops.index_update(theta_a, jax.ops.index[:nocca, nocca:nbsf], -np.transpose(daia))
theta_b = jax.ops.index_update(theta_b, jax.ops.index[noccb:nbsf,:noccb], daib)
theta_b = jax.ops.index_update(theta_b, jax.ops.index[:noccb, noccb:nbsf], -np.transpose(daib))
Ua = np.eye(nbsf,dtype=np.float64)
tmp = np.eye(nbsf,dtype=np.float64)
Ua = compute_exp(Ua, tmp, theta_a)
C0a = np.array(c0[:nbsf*nbsf].reshape((nbsf,nbsf)),dtype=np.float64)
Ca = C0a.dot(Ua)
Ga = gab(Ca[:,:nocca], Ca[:,:nocca])
if (noccb > 0):
C0b = np.array(c0[nbsf*nbsf:].reshape((nbsf,nbsf)),dtype=np.float64)
Ub = np.eye(nbsf)
tmp = np.eye(nbsf)
Ub = compute_exp(Ub, tmp, theta_b)
Cb = C0b.dot(Ub)
Gb = gab(Cb[:,:noccb], Cb[:,:noccb])
else:
Gb = np.zeros_like(Ga)
G = np.array([Ga, Gb],dtype=np.float64)
if (restricted_shift):
shift = jax.ops.index_update(shift, jax.ops.index[:nbasis], x[0])
phi = HarmonicOscillator(m, w0, order=0, shift = shift)
Lap = phi.laplacian(shift)
etot, eel, eph = local_energy_hubbard_holstein_jax(T,U, g,m,w0, G, shift, Lap)
return etot.real
class CoherentState(object):
def __init__(self, system, options, verbose=False):
self.verbose = verbose
if verbose:
print ("# Parsing free electron input options.")
init_time = time.time()
self.name = "coherent_state"
self.type = "coherent_state"
self.trial_type = complex
self.initial_wavefunction = options.get('initial_wavefunction',
'coherent_state')
if verbose:
print ("# Diagonalising one-body Hamiltonian.")
(self.eigs_up, self.eigv_up) = diagonalise_sorted(system.T[0])
(self.eigs_dn, self.eigv_dn) = diagonalise_sorted(system.T[1])
self.reference = options.get('reference', None)
self.exporder = options.get('exporder', 6)
self.maxiter = options.get('maxiter', 3)
self.maxscf = options.get('maxscf', 500)
self.ueff = options.get('ueff', system.U)
if verbose:
print("# exporder in CoherentState is 15 no matter what you entered like {}".format(self.exporder))
self.psi = numpy.zeros(shape=(system.nbasis, system.nup+system.ndown),
dtype=self.trial_type)
assert (system.name == "HubbardHolstein")
self.m = system.m
self.w0 = system.w0
self.nbasis = system.nbasis
self.nocca = system.nup
self.noccb = system.ndown
self.algorithm = options.get('algorithm',"bfgs")
self.random_guess = options.get('random_guess',False)
self.symmetrize = options.get('symmetrize',False)
if verbose:
print("# random guess = {}".format(self.random_guess))
if verbose:
print("# Symmetrize Coherent State = {}".format(self.symmetrize))
self.wfn_file = options.get('wfn_file', None)
self.coeffs = None
self.perms = None
if self.wfn_file is not None:
if verbose:
print ("# Reading trial wavefunction from %s"%(self.wfn_file))
f = h5py.File(self.wfn_file, "r")
self.shift = f["shift"][()].real
self.psi = f["psi"][()]
f.close()
if (len(self.psi.shape) == 3):
if verbose:
print("# MultiCoherent trial detected")
self.symmetrize = True
self.perms = None
f = h5py.File(self.wfn_file, "r")
self.coeffs = f["coeffs"][()]
f.close()
self.nperms = self.coeffs.shape[0]
assert(self.nperms == self.psi.shape[0])
assert(self.nperms == self.shift.shape[0])
self.boson_trial = HarmonicOscillator(m = system.m, w = system.w0, order = 0, shift=self.shift[0,:])
self.G = None
if verbose:
print("# A total of {} coherent states are used".format(self.nperms))
else:
gup = gab(self.psi[:, :system.nup],
self.psi[:, :system.nup]).T
if (system.ndown > 0):
gdown = gab(self.psi[:, system.nup:],
self.psi[:, system.nup:]).T
else:
gdown = numpy.zeros_like(gup)
self.G = numpy.array([gup, gdown], dtype=self.psi.dtype)
self.boson_trial = HarmonicOscillator(m = system.m, w = system.w0, order = 0, shift=self.shift)
else:
free_electron = options.get('free_electron', False)
if (free_electron):
trial_elec = FreeElectron(system, trial=options, verbose=self.verbose)
else:
trial_elec = UHF(system, trial=options, verbose=self.verbose)
self.psi[:, :system.nup] = trial_elec.psi[:, :system.nup]
if (system.ndown > 0):
self.psi[:, system.nup:] = trial_elec.psi[:, system.nup:]
Pa = self.psi[:, :system.nup].dot(self.psi[:, :system.nup].T)
Va = ( | numpy.eye(system.nbasis) | numpy.eye |
import argparse
import configparser
import os
import numpy as np
from datetime import datetime, date
from pandas import DataFrame
from coverage import root_dir
import coverage.tools.dataloader as dataloader
from coverage.tools import common_utils
import coverage.tools.model_utils as model_utils
from coverage.tools.coverage_utils import execute_sampling, SurpriseCoverage
def get_aggregated_indices(labels, select_idx):
sampled_indices_list = []
for class_id in select_idx:
sampled_indices = np.nonzero(labels == class_id)[0]
sampled_indices_list.append(sampled_indices)
aggregated_indices = np.concatenate(sampled_indices_list)
return aggregated_indices
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--sample_capacity", help="number of images", type=int, default=800)
parser.add_argument("--repeat_times", help="number of selected classes", type=int, default=2)
parser.add_argument("--dataset_network", help="selected class id", type=str, default="cifar100_resnet32")
parser.add_argument("--attack", help="adversarial attack", type=str, default="cw")
parser.add_argument("--exp_date", help="data_of_exp", type=str,)
parser.add_argument("--split_id", help="id number of select split", type=int, default=1)
console_args = parser.parse_args()
print(console_args)
dataset_network = console_args.dataset_network
exp_cfg = configparser.ConfigParser()
coverage_parameters = {"n_bucket": 1000}
exp_cfg.read(f"{root_dir}/config/exp.conf")
total_group_nums = exp_cfg['parameters'].getint("group_nums")
coverage_parameters["kmnc_k_section"] = exp_cfg['parameters'].getint("kmnc_k_section")
coverage_parameters["tknc_k_value"] = exp_cfg['parameters'].getint("tknc_k_value")
coverage_parameters["nc_threshold"] = exp_cfg['parameters'].getfloat("nc_threshold")
coverage_parameters["idc_relevant_neurons"] = exp_cfg['parameters'].getint("idc_relevant_neurons")
rq3_path = exp_cfg['parameters'].get("rq3_path")
sa_dir_name = exp_cfg['parameters'].get("sa_intermediate")
sa_intermedia_path = os.path.join(root_dir, sa_dir_name)
idc_dir_name = exp_cfg['parameters'].get("idc_intermediate")
idc_intermedia_path = os.path.join(root_dir, idc_dir_name)
coverage_parameters["idc_intermedia_path"] = idc_intermedia_path
console_args.exp_date = str(date.today()) if console_args.exp_date is None else console_args.exp_date
dataset_network_dir = os.path.join(root_dir, rq3_path, console_args.exp_date, dataset_network)
common_utils.create_path(sa_intermedia_path, idc_intermedia_path, rq3_path, dataset_network_dir)
dataset_name, network_name = tuple(dataset_network.split("_"))
num_classes = dataloader.class_num(dataset_name)
test_sizes = dataloader.test_sizes[dataset_name]
s0 = datetime.now()
# load model and boundary
classifier = model_utils.load_model(network=network_name, dataset=dataset_name)
boundary = common_utils.load_boundary(dataset_name, network_name)
# direct use `size_per_class` correctly classified images
x_test, y_test = dataloader.load_dataset(dataset_name)
x_test = dataloader.preprocess_dataset(dataset_name, network_name, x_test)
print(f"INFO: {dataset_name, network_name} value range of clean images :[{np.min(x_test)},{np.max(x_test)}]")
# the adversarial inputs are already preprocessed.
adv_x, adv_y = dataloader.load_adversarial_images(dataset_name, network_name, console_args.attack, mode="full")
print(f"INFO: {dataset_name, network_name} value range of adv images :[{np.min(adv_x)},{np.max(adv_x)}]")
# I skip loading train set here. We don't need train-set because we have generated SA and IDC intermediate files
skip_train = True
if skip_train:
x_train = y_train = None
else:
# note that the y_train is not in one-vector format. It's just an array of class ids.
x_train, y_train = dataloader.load_train_set(console_args.dataset)
x_train = dataloader.preprocess_dataset(console_args.dataset, console_args.network, x_train)
print(f"INFO: {console_args.dataset, console_args.network} "
f"value range of train images :[{np.min(x_train)},{np.max(x_train)}]")
print(f"Data & Model preparing time:{datetime.now() - s0}")
sampling_indices = common_utils.sampling_indices_dict(500, dataset_model=dataset_network,
test_size=console_args.sample_capacity)
correct_indices = sampling_indices['pure_correct_indices']
pure_correct_labels = y_test[correct_indices].copy()
# we divide the classes into ten splits
section_num = 10
class_ids = np.arange(num_classes)
section_length = int(num_classes / section_num)
adv_lsa, adv_dsa, adv_mdsa = common_utils.cached_sa(dataset_network=dataset_network,
attack_type=console_args.attack,
test_size=test_sizes)
clean_lsa, clean_dsa, clean_mdsa = common_utils.cached_sa(dataset_network=dataset_network,
attack_type="normal",
test_size=test_sizes)
sa_dict = dict()
sa_dict["clean_lsa"], sa_dict["adv_lsa"] = clean_lsa, adv_lsa
sa_dict["clean_dsa"], sa_dict["adv_dsa"] = clean_dsa, adv_dsa
sa_dict["clean_mdsa"], sa_dict["adv_mdsa"] = clean_mdsa, adv_mdsa
sa_dict["lsa_boundary"] = SurpriseCoverage.filter_outliers("LSA",np.concatenate([clean_lsa,adv_lsa]).copy())
sa_dict["dsa_boundary"] = SurpriseCoverage.filter_outliers("DSA",np.concatenate([clean_dsa,adv_dsa]).copy())
sa_dict["mdsa_boundary"] = SurpriseCoverage.filter_outliers("MDSA",np.concatenate([clean_mdsa,adv_mdsa]).copy())
start_class_id = int(section_length * console_args.split_id)
top_idx = class_ids[start_class_id:start_class_id + section_length]
print(f"Selecting spilt:{console_args.split_id},classes:{top_idx}")
df_titles = ["Sampling_Name", "correct_proportion", "NC", "NBC", "SNAC", "TKNC", 'KMNC', "LSC", "DSC", "MDSC",
"IDC", "error_rate"]
df_path = os.path.join(dataset_network_dir,
f"{console_args.dataset_network}_{console_args.attack}_size{console_args.sample_capacity}"
f"_class_ratio-split{console_args.split_id}.xlsx")
df = DataFrame(columns=df_titles)
row_id = 0
_aggregated_correct_idx = get_aggregated_indices(pure_correct_labels, top_idx)
aggregated_correct_idx = correct_indices[_aggregated_correct_idx]
aggregated_wrong_idx = get_aggregated_indices(adv_y, top_idx)
s0 = datetime.now()
for rid in range(console_args.repeat_times):
if len(aggregated_correct_idx) >= console_args.sample_capacity:
adv_minimum = 0
else:
adv_minimum = console_args.sample_capacity - len(aggregated_correct_idx)
adv_maximum = int(console_args.sample_capacity * 0.7)
assert adv_maximum > adv_minimum, f"Maximum {adv_maximum} <= Minimum {adv_minimum}. " \
f"Only {len(aggregated_correct_idx)} correct inputs are found."
wrong_num = np.random.randint(low=adv_minimum, high=adv_maximum + 1)
correct_num = console_args.sample_capacity - wrong_num
print(f"Repeat times: {rid} of {console_args.repeat_times}, correct: {correct_num}, wrong: {wrong_num}")
select_correct_idx = np.random.choice(a=aggregated_correct_idx, size=correct_num, replace=False)
select_wrong_idx = | np.random.choice(a=aggregated_wrong_idx, size=wrong_num, replace=False) | numpy.random.choice |
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial
from .complexity_embedding import complexity_embedding
from .fractal_correlation import fractal_correlation
def complexity_dimension(signal, delay=1, dimension_max=20, method="afnn", show=False, **kwargs):
"""Automated selection of the optimal Dimension (m) for time-delay embedding.
From this
`thread <https://www.researchgate.net/post/How-can-we-find-out-which-value-of-embedding-dimensions-is-more-accurate>`_:
"In the early days, the method of choice was to calculate the correlation dimension in various embeddings and
look for a saturation in its value as the embedding dimension increases. However, a saturation will always occur
when you no longer have enough data to adequately fill your high-dimensional space. More recently the method of choice
has been false nearest neighbors, although that suffers from the same problem when the neighborhood does not contain
sufficiently many points. As a rule of thumb, you might demand that each dimension have at least ten points."
Parameters
----------
signal : Union[list, np.array, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag').
In practice, it is common to have a fixed time lag (corresponding for instance to the
sampling rate; Gautama, 2003), or to find a suitable value using some algorithmic heuristics
(see ``complexity_delay()``).
dimension_max : int
The maximum embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order')
to test.
method : str
Method can either be 'afnn' (average false nearest neighbour), 'fnn' (false nearest neighbour),
or 'correlation' (correlation dimension).
show : bool
Visualize the result.
**kwargs
Other arguments, such as ``R=10.0``, ``A=2.0`` (relative and absolute tolerance, only for 'fnn' method).
Returns
-------
delay : int
Optimal dimension.
parameters : dict
A dictionary containing additional information regarding the parameters used
to compute the optimal dimension.
See Also
------------
complexity_delay, complexity_embedding
Examples
---------
>>> import neurokit2 as nk
>>>
>>> # Artifical example
>>> signal = nk.signal_simulate(duration=10, frequency=1, noise=0.01)
>>> # Find optimal delay
>>> delay, parameters = nk.complexity_delay(signal, delay_max=500)
>>>
>>> # Find optimal dimension
>>> optimal_dimension, info = nk.complexity_dimension(signal, delay=delay, dimension_max=20, method='afnn', show=True)
>>> optimal_dimension, info = nk.complexity_dimension(signal, delay=delay, dimension_max=20, method='fnn', show=True)
References
-----------
- <NAME>. (1997). Practical method for determining the minimum embedding dimension of a scalar
time series. Physica D: Nonlinear Phenomena, 110(1-2), 43-50.
"""
# Initialize vectors
if isinstance(dimension_max, int):
dimension_seq = np.arange(1, dimension_max + 1)
else:
dimension_seq = np.array(dimension_max)
# Method
method = method.lower()
if method in ["afnn"]:
# Append value (as it gets cropped afterwards anyway)
dimension_seq = np.append(dimension_seq, [dimension_seq[-1] + 1])
E, Es = _embedding_dimension_afn(signal, dimension_seq=dimension_seq, delay=delay, **kwargs)
E1 = E[1:] / E[:-1]
E2 = Es[1:] / Es[:-1]
# To find where E1 saturates, set a threshold of difference
# threshold = 0.1 * (np.max(E1) - np.min(E1))
min_dimension = [i for i, x in enumerate(E1 >= 0.85 * np.nanmax(E1)) if x][0] + 1
# To standardize the length of dimension_seq with E1 and E2
dimension_seq = dimension_seq[:-1]
# Store information
info = {"Method": method, "Values": dimension_seq, "E1": E1, "E2": E2}
if show is True:
_embedding_dimension_plot(
method=method,
dimension_seq=dimension_seq,
min_dimension=min_dimension,
E1=E1,
E2=E2,
)
elif method in ["fnn"]:
f1, f2, f3 = _embedding_dimension_ffn(
signal, dimension_seq=dimension_seq, delay=delay, **kwargs
)
min_dimension = [i for i, x in enumerate(f3 <= 1.85 * np.min(f3[np.nonzero(f3)])) if x][0]
# Store information
info = {"Method": method, "Values": dimension_seq, "f1": f1, "f2": f2, "f3": f3}
if show is True:
_embedding_dimension_plot(
method=method,
dimension_seq=dimension_seq,
min_dimension=min_dimension,
f1=f1,
f2=f2,
f3=f3,
)
elif method in ["correlation", "cd"]:
CDs = _embedding_dimension_correlation(signal, dimension_seq, delay=delay, **kwargs)
# Find elbow (TODO: replace by better method of elbow localization)
min_dimension = dimension_seq[np.where(CDs >= 0.66 * np.max(CDs))[0][0]]
# Store information
info = {"Method": method, "Values": dimension_seq, "CD": CDs}
if show is True:
_embedding_dimension_plot(
method=method,
dimension_seq=dimension_seq,
min_dimension=min_dimension,
CD=CDs,
)
else:
raise ValueError("NeuroKit error: complexity_dimension(): 'method' not recognized.")
return min_dimension, info
# =============================================================================
# Methods
# =============================================================================
def _embedding_dimension_correlation(signal, dimension_seq, delay=1, **kwargs):
"""Return the Correlation Dimension (CD) for a all d in dimension_seq."""
CDs = np.zeros(len(dimension_seq))
for i, d in enumerate(dimension_seq):
CDs[i] = fractal_correlation(signal, dimension=d, delay=delay, **kwargs)[0]
return CDs
def _embedding_dimension_afn(signal, dimension_seq, delay=1, **kwargs):
"""Return E(d) and E^*(d) for a all d in dimension_seq.
E(d) and E^*(d) will be used to calculate E1(d) and E2(d).
El(d) = E(d + 1)/E(d). E1(d) stops changing when d is greater than some value d0 if the time
series comes from an attractor. Then d0 + 1 is the minimum embedding dimension we look for.
E2(d) = E*(d + 1)/E*(d). E2(d) is a useful quantity to distinguish deterministic signals from
stochastic signals. For random data, since the future values are independent of the past values,
E2(d) will be equal to 1 for any d. For deterministic data, E2(d) is certainly related to d, it
cannot be a constant for all d; there must exist somed's such that E2(d) is not 1.
"""
values = np.asarray(
[
_embedding_dimension_afn_d(signal, dimension, delay, **kwargs)
for dimension in dimension_seq
]
).T
E, Es = values[0, :], values[1, :]
return E, Es
def _embedding_dimension_afn_d(
signal, dimension, delay=1, metric="chebyshev", window=10, maxnum=None, **kwargs
):
"""Return E(d) and E^*(d) for a single d.
Returns E(d) and E^*(d) for the AFN method for a single d.
"""
d, dist, index, y2 = _embedding_dimension_d(signal, dimension, delay, metric, window, maxnum)
# Compute the ratio of near-neighbor distances in d + 1 over d dimension
# Its average is E(d)
if any(d == 0) or any(dist == 0):
E = np.nan
Es = np.nan
else:
E = np.mean(d / dist)
# Calculate E^*(d)
Es = np.mean(np.abs(y2[:, -1] - y2[index, -1]))
return E, Es
def _embedding_dimension_ffn(signal, dimension_seq, delay=1, R=10.0, A=2.0, **kwargs):
"""Compute the fraction of false nearest neighbors.
The false nearest neighbors (FNN) method described by Kennel et al.
(1992) to calculate the minimum embedding dimension required to embed a scalar time series.
Returns 3 vectors:
- f1 : Fraction of neighbors classified as false by Test I.
- f2 : Fraction of neighbors classified as false by Test II.
- f3 : Fraction of neighbors classified as false by either Test I or Test II.
"""
values = np.asarray(
[
_embedding_dimension_ffn_d(signal, dimension, delay, R=R, A=A, **kwargs)
for dimension in dimension_seq
]
).T
f1, f2, f3 = values[0, :], values[1, :], values[2, :]
return f1, f2, f3
def _embedding_dimension_ffn_d(
signal, dimension, delay=1, R=10.0, A=2.0, metric="euclidean", window=10, maxnum=None
):
"""Return fraction of false nearest neighbors for a single d."""
d, dist, index, y2 = _embedding_dimension_d(signal, dimension, delay, metric, window, maxnum)
# Find all potential false neighbors using Kennel et al.'s tests.
dist[dist == 0] = np.nan # assign nan to avoid divide by zero error in next line
f1 = np.abs(y2[:, -1] - y2[index, -1]) / dist > R
f2 = d / np.std(signal) > A
f3 = f1 | f2
return np.mean(f1), np.mean(f2), np.mean(f3)
# =============================================================================
# Internals
# =============================================================================
def _embedding_dimension_d(signal, dimension, delay=1, metric="chebyshev", window=10, maxnum=None):
# We need to reduce the number of points in dimension d by tau
# so that after reconstruction, there'll be equal number of points
# at both dimension d as well as dimension d + 1.
y1 = complexity_embedding(signal[:-delay], delay=delay, dimension=dimension)
y2 = complexity_embedding(signal, delay=delay, dimension=dimension + 1)
# Find near neighbors in dimension d.
index, dist = _embedding_dimension_neighbors(y1, metric=metric, window=window, maxnum=maxnum)
# Compute the near-neighbor distances in d + 1 dimension
# TODO: is there a way to make this faster?
d = [scipy.spatial.distance.chebyshev(i, j) for i, j in zip(y2, y2[index])]
return np.asarray(d), dist, index, y2
def _embedding_dimension_neighbors(y, metric="chebyshev", window=0, maxnum=None, show=False):
"""Find nearest neighbors of all points in the given array. Finds the nearest neighbors of all points in the
given array using SciPy's KDTree search.
Parameters
----------
y : ndarray
embedded signal: N-dimensional array containing time-delayed vectors.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003),
or to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension_max : int
The maximum embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order')
to test.
metric : str
Metric to use for distance computation. Must be one of "cityblock" (aka the Manhattan metric),
"chebyshev" (aka the maximum norm metric), or "euclidean". Defaults to 'chebyshev'.
window : int
Minimum temporal separation (Theiler window) that should exist between near neighbors.
This is crucial while computing Lyapunov exponents and the correlation dimension. Defaults to 0.
maxnum : int
Maximum number of near neighbors that should be found for each point.
In rare cases, when there are no neighbors that are at a nonzero distance, this will have to
be increased (i.e., beyond 2 * window + 3). Defaults to None (optimum).
show : bool
Defaults to False.
Returns
-------
index : array
Array containing indices of near neighbors.
dist : array
Array containing near neighbor distances.
"""
if metric == "chebyshev":
p = np.inf
elif metric == "cityblock":
p = 1
elif metric == "euclidean":
p = 2
else:
raise ValueError(
'Unknown metric. Should be one of "cityblock", ' '"euclidean", or "chebyshev".'
)
tree = scipy.spatial.cKDTree(y) # pylint: disable=E1102
n = len(y)
if not maxnum:
maxnum = (window + 1) + 1 + (window + 1)
else:
maxnum = max(1, maxnum)
if maxnum >= n:
raise ValueError("maxnum is bigger than array length.")
# Query for k numbers of nearest neighbors
distances, indices = tree.query(y, k=range(1, maxnum + 2), p=p)
# Substract the first point
valid = indices - np.tile(np.arange(n), (indices.shape[1], 1)).T
# Remove points that are closer than min temporal separation
valid = np.abs(valid) > window
# Remove also self reference (d > 0)
valid = valid & (distances > 0)
# Get indices to keep
valid = (np.arange(len(distances)), | np.argmax(valid, axis=1) | numpy.argmax |
# --------------------------------------------------------
# Multi-Epitope-Ligand Cartography (MELC) phase-contrast image based segmentation pipeline
#
#
# Written by <NAME>
# --------------------------------------------------------
# Official libraries
import numpy as np
import pandas as pd
import cv2
import tifffile as tiff
import json
import scipy.signal as signal
from numpy.random import randint
from scipy.signal import medfilt2d
from os.path import join, isfile, exists
from copy import deepcopy
# My libraries and packages
import MELC.utils.Files as myF
import MELC.Client.Registration as myReg
import os
if os.name == 'nt':
from configWin import *
else:
from config import *
from MELC.DLManager.Augmenter import CellObjectAugmenter, ImageAugmenter
from MELC.Client.Annotation import SVGAnnot
from numpy.random import randint, uniform
#visualisation
class MELCDataset:
"""
A class representing one Run of MELC Dataset in raw format.
Class for loading MELC Run data saved in the native raw format.
It browses relevant folders, decodes filenames, reads data, registers images and subtracts background.
Automatically performs image registration.
Class stores META data for the MELC Run including the registration indexes.
If loading next time, class reads META data, at first, and verifies them.
If everything is fine, the class uses those META, instead compute evertyhing from the beginning.
When loading fluorescence images, the class takes care about correct light field and background correction
and also performes median filtration to prevent impulse noise present in most of the fluorescent images.
Subtracted and filtered images are simultaneously saved into META data folder.
If all META data are matching raw files, in the next class initialization, saved META images are used
insted of the all computation again to save time.
...
Attributes
----------
PATH_DATA : str
Path to the MELC Run raw data folder containing source and bleach folder with *.png files.
Defined during initialization, by input parameter.
PATH_META : str
Path to the META folder
PATH_META_IMG : int
Path to the folder, where META images are stored. Generated automatically.
FILE_META_RUN : str
Path to the file, where META data about Run are stored. Generated automatically
files_pd : pandas.DataFrame
pandas.DataFrame containing META data about image files in the Run.
Keys : 'path', 'fid', 'step_index', 'filter', 'integration_time', 'antibody',
'modality', 'type', 'registration'
calib_pd : pandas.DataFrame
pandas.DataFrame containing META data about calibration (light and dark field) images.
Keys : 'integration_time', 'calib_type', 'filter', 'path'
antibodies : numpy.array
Array of all present antibodies in the Run
border_min_y : int
Field of view for all registred images. It is smaller, than actual size of the image
because of the various shift caused by moving of the field of view.
border_max_y : int
Field of view for all registred images. It is smaller, than actual size of the image
because of the various shift caused by moving of the field of view.
border_min_x : int
Field of view for all registred images. It is smaller, than actual size of the image
because of the various shift caused by moving of the field of view.
border_max_x : int
Field of view for all registred images. It is smaller, than actual size of the image
because of the various shift caused by moving of the field of view.
Methods
-------
__init__(data_path)
Initializes class object and calls all methods for META data reading, validation, or creation.
__len__()
Returns the number of antibodies present in Run
__META_create(data_path)
Browses given folder and creates META data to *.png all files.
Decodes names and stores all information into the self.files_pd variable.
The output contains calibration images as well.
__META_write()
Writes META data into *.csv file to the position defined by FILE_META_RUN variable.
__META_read()
Reads *.csv file from FILE_META_RUN filepath. And converts registration indexes from string to int.
__META_verify()
Checks if all files from META data exist.
__META_remove_all()
Removes meta for current run with all files and subfolders
__register_dataset()
Performs image coregistration. Two indexes are added to the files_pd.
Coeficients are then translated in get_image_META() function into the coeficients
__init_calibrations()
Initialize calib_pd ready to use. Performs further decoding of the calibration file filenames.
get_image_META(index)
index: int
Returns Dictionary with metadata for all files relating to the antibody with *index*.
Metadata for fluorescence and background fluorescence with corresponding phase contrast images are provided.
get_subtracted(index)
index: int
Returns phase contras, fluorescence image and metadata of antibody indexed by input variable.
Background image and light field artifacts are already corrected for fluorescence.
get_average_phase()
Average phase image of registered phase contrast images from source (only fluorescence image corresponding phase
contrast images, not bleaching) folder of original raw data.
"""
def __init__(self, data_path):
"""
Initializes MELC Run class object given by filepath data_path and calls all methods for META data reading, validation, or creation.
:param: data_path: str
"""
# generating paths
self.PATH_DATA = data_path
self.PATH_META = join(PATH_DATA_META, 'MELC_data', data_path.split(SEPARATOR)[-1])
self.PATH_META_IMG = join(PATH_DATA_META, 'MELC_data', data_path.split(SEPARATOR)[-1], 'imgs')
self.FILE_META_RUN = join(self.PATH_META, data_path.split(SEPARATOR)[-1] + '.csv')
# check if self.FILE_META_RUN exists. if not, create neccesary folders and create metadata
# do registration and save them
if not isfile(self.FILE_META_RUN):
if not exists(self.PATH_META):
if not exists(join(PATH_DATA_META, 'MELC_data')):
myF.create_folder(join(PATH_DATA_META, 'MELC_data'))
myF.create_folder(self.PATH_META)
self.files_pd = self.__META_create(data_path)
self.__register_dataset()
self.__META_write()
# if yes, then read them and validate. If files are NOT the same as metadata, create new metadata.
# remove all old metadata and save the new ones
else:
try:
self.__META_read()
except:
try: myF.remove_folder(data_path)
except: pass
self.__META_create(data_path)
self.__register_dataset()
self.__META_write()
if not self.__META_verify():
try: myF.remove_folder(data_path)
except: pass
self.__META_create(data_path)
self.__register_dataset()
self.__META_write()
if not exists(self.PATH_META_IMG): # checks the if folder for saving images exists, if not -> create
myF.create_folder(self.PATH_META_IMG)
# separate DataFrame for antibody images and calibration images
self.calib_pd = deepcopy(self.files_pd.loc[self.files_pd['step_index'] == 0].reset_index(drop='True'))
self.files_pd = deepcopy(self.files_pd.loc[self.files_pd['step_index'] > 0].reset_index(drop='True'))
self.__init_calibrations() # transform calibration dataframe to ready-to-use form
# stores the antibodies of dataset into the var
# self.antibody_indexes = np.array()
# now I can ask for length of antibody array
# checks all antibodies, which are not NONE or PBS
# the aim is to exclude PBS files which can appear in the middle of the run
self.antibody_indexes = np.array([])
for idx in np.unique(self.files_pd['step_index']):
temp = np.unique(self.files_pd.loc[self.files_pd['step_index'] == \
np.unique(self.files_pd['step_index'])[idx - 1]]['antibody'])
for staining in temp:
if not (staining == 'PBS' or staining == 'NONE'):
self.antibody_indexes = np.append(self.antibody_indexes, idx)
self.antibodies = np.empty(self.__len__(), dtype="<U20") # array allocation
for k in range(0, self.__len__()): # takes antibody notation by its step_number
temp = np.unique(self.files_pd.loc[self.files_pd['step_index'] == self.antibody_indexes[k]]['antibody'])
self.antibodies[k] = temp[(temp!='PBS') & (temp!='NONE')][0]
# takes the antibody of given step. there is k+2 because calibration (0) and PBS (1) do not count as an antibody
# Decrease the size of field of view for run based on registration coeficients
self.border_min_y = 0
self.border_max_y = 0
self.border_min_x = 0
self.border_max_x = 0
for row in self.files_pd.iterrows():
temp = row[1]['registration']
if temp[0] < self.border_min_y:
self.border_min_y = temp[0]
elif temp[0] > self.border_max_y:
self.border_max_y = temp[0]
if temp[1] < self.border_min_x:
self.border_min_x = temp[1]
elif temp[1] > self.border_max_x:
self.border_max_x = temp[1]
def __get_MELCDataset_len__(self):
"""
Returns value of the number of valid antibodies in the run.
:return length: int
"""
return len(self.antibody_indexes)
def __len__(self):
"""
Returns value of the number of valid antibodies in the run.
:return length: int
"""
return self.__get_MELCDataset_len__()
def __META_create(self, data_path):
"""
Browses given folder and creates META data to *.png all files.
Return pandas.DataFrame with decoded names and pointers.
The output contains calibration coeficients as well.
:param data_path: str
:return files_pd: pandas.DataFrame
"""
files_png = myF.get_files(data_path, ('png', 'PNG'))
files_pd = pd.DataFrame(files_png)
def get_FID(x):
"""
separates file id from the MELC Run path
:param x: pandas.DataFrame (row)
:return fid: str
"""
temp = x['path'].split(SEPARATOR)
return temp[len(temp) - 1][0:-4]
def get_order_index(x):
"""
extracts index at which step (antibody) the image was acquired from data path
:param x: pandas.DataFrame (row)
:return step_index: str
"""
return int(x['path'].split(SEPARATOR)[-1].split('_')[-1][0:-4])
def get_filter(x):
"""
extracts the filter used during this image acquisition
:param x: pandas.DataFrame (row)
:return filter: str
"""
temp = x['path'].split(SEPARATOR)[-1].split('_')
if len(temp) > 3:
return temp[-2]
else:
return ''
def get_integration_time(x):
"""
extracts the time integration time of given image
:param x: pandas.DataFrame (row)
:return integration_time: str
"""
temp = x['path'].split(SEPARATOR)[-1].split('_')
if len(temp) > 3:
return int(temp[-3])
else:
return 0
def get_antibody(x):
"""
extracts antibody of given image
:param x: pandas.DataFrame (row)
:return antibody: str
"""
return x['fid'].split('_')[1]
def get_modality(x):
"""
extracts whether the image was phase-contrast of fluorescent image
:param x: pandas.DataFrame (row)
:return modality: str
"""
temp = x['fid'].split('_')[0]
if temp == 'o' or temp == 'b':
return 'fluo'
elif temp == 'p' or temp == 'pb':
return 'phase'
else:
return ''
def get_image_type(x):
"""
extracts wheter image is acquired during source or bleaching phase of the cycle
:param x: pandas.DataFrame (row)
:return image_phase: str
"""
temp = x['path'].split(SEPARATOR)[-2]
if temp == 'source':
return temp
elif temp == 'bleach':
return temp
else:
return ''
# extract all neccesary information from the filenames and insert them into the DataFrame
files_pd = files_pd.rename(columns={0: "path"})
files_pd['fid'] = files_pd.apply(lambda x: get_FID(x), axis=1)
files_pd['step_index'] = files_pd.apply(lambda x: get_order_index(x), axis=1)
files_pd['filter'] = files_pd.apply(lambda x: get_filter(x), axis=1)
files_pd['integration_time'] = files_pd.apply(lambda x: get_integration_time(x), axis=1)
files_pd['antibody'] = files_pd.apply(lambda x: get_antibody(x), axis=1)
files_pd['modality'] = files_pd.apply(lambda x: get_modality(x), axis=1)
files_pd['type'] = files_pd.apply(lambda x: get_image_type(x), axis=1)
files_pd = files_pd.loc[files_pd['integration_time'] > 0].reset_index(drop='True')
return files_pd
def __META_write(self):
"""
Writes META data into the file given by self.FILE_META_RUN variable
:return:
"""
self.files_pd.to_csv(self.FILE_META_RUN)
def __META_read(self):
"""
Reads METADATA from the file given by self.FILE_META_RUN variable.
:return:
"""
def convert_registration_to_int(x):
"""
Converts string array like '[ 0, -1]' into a numpy integer array. This solves the problem,
that indexes are string array after reading META csv file.
:param x:
:return:
"""
temp = np.array(x['registration'][1:-1].split(' '))
temp_array = np.array([], dtype=temp.dtype)
for ele in temp:
if len(ele) > 0: temp_array = np.append(temp_array, ele)
return temp_array.astype(np.int16)
self.files_pd = pd.read_csv(self.FILE_META_RUN)
self.files_pd = self.files_pd.drop(columns=self.files_pd.keys()[0]) # read_csv includes indexes as an column. Here is removed
self.files_pd['registration'] = self.files_pd.apply(lambda x: convert_registration_to_int(x), axis=1)
# converts registration from string like '[ 0, -1]' into int numpy array
def __META_verify(self):
"""
Checks if all files from META data exist. Returns True or False method.
:return is_it_fine: bool
"""
if len(self.files_pd) == len(self.__META_create(self.PATH_DATA)):
for file in self.files_pd['path']:
if not isfile(file):
return False
return True
else:
return False
def __META_remove_all(self):
"""
Removes all meta folder with all files and subfolders
:return:
"""
myF.remove_folder(self.PATH_META)
def __register_dataset(self):
"""
Performs image coregistration. Two indexes are added to the files_pd. Registrates all images
to the first phase-contrast source-image.
Coeficients are then translated in get_image_META() function into the coeficients
:return:
"""
def do_register(ref_img, x):
"""
reads image given by path in DataFrame (row) and calls registration functions from MELC.Client.Registration
:param ref_img: numpy.array
:param x: pandas.DataFrame (row)
:return registration_index: numpy.array of int indexes
"""
if x['modality'] == 'phase': # registers only phase contrast images
temp = x['path']
img = cv2.imread(temp, 2).astype(np.float32)
img = myReg.get_diff(img) # cost function is computed from image representation, estimated using this func
reg_idx, heatmap = myReg.register_images(ref_img, img,
(100, 100),
(750, 750), (26, 26))
# registers img on referenc image ref_img with a window of size (100, 100)px at the position at (750,750)px
# and browses raster of size (maximum shift) (26, 26)px
reg_idx = np.array([reg_idx[0], reg_idx[1]]).astype(np.int16)
return reg_idx
else: return np.array([0, 0], dtype = np.int16)
reference = deepcopy(self.files_pd.loc[self.files_pd['modality'] == 'phase']).reset_index(drop='True').iloc[0]
ref_img = cv2.imread(reference['path'], 2).astype(np.float32)
ref_img = myReg.get_diff(ref_img)
print('Registration ' + self.PATH_DATA.split(SEPARATOR)[-1])
self.files_pd['registration'] = self.files_pd.apply(lambda x: do_register(ref_img, x), axis=1)
for index, row in self.files_pd.iterrows():
if row['modality'] == 'fluo' and row['step_index'] > 0:
temp = self.files_pd.loc[
(self.files_pd['step_index'] == row['step_index']) &
(self.files_pd['modality'] == 'phase') &
(self.files_pd['type'] == row['type'])
]
row['registration'][0] = temp['registration'][temp.index[0]][0]
row['registration'][1] = temp['registration'][temp.index[0]][1]
def __init_calibrations(self):
"""
Initialize calib_pd ready to use. Performs further decoding of the calibration file filenames.
Transforms calib_pd DataFrame to another with relevant details for image correction, like integration time,
dark or bright field and so on.
:return:
"""
def get_calib_type(x):
"""
Returns b or d, depends on the file id. Represents bright and dark field.
"""
return x['fid'].split('_')[2][0]
self.calib_pd['calib_type'] = self.calib_pd.apply(lambda x: get_calib_type(x), axis=1)
calib = np.unique(self.calib_pd['calib_type'])
int_time = np.unique(self.calib_pd['integration_time'])
filt = np.unique(self.calib_pd['filter'])
calib_pd = {
'integration_time': [],
'calib_type': [],
'filter': [],
'path': []
}
to_erase = np.array([])
cntr = 0
for cal in calib:
for int_t in int_time:
for f in filt:
calib_pd['calib_type'].append(cal)
calib_pd['integration_time'].append(int_t)
calib_pd['filter'].append(f)
files = deepcopy(self.calib_pd.loc[
(self.calib_pd['calib_type'] == cal) &
(self.calib_pd['integration_time'] == int_t) &
(self.calib_pd['filter'] == f)
]).reset_index(drop='True')
if len(files) > 0:
img_path = files['path'][0]
#img = cv2.imread(files['path'][0], 2).astype(np.float32)
#for k in range(1, len(files)):
# img = img + cv2.imread(files['path'][k], 2).astype(np.float32)
#img = img / len(files)
else:
img_path = ''
#img_path = np.array([])
to_erase = np.append(to_erase, cntr)
cntr += 1
#calib_pd['image'].append(img)
calib_pd['path'].append(img_path)
calib_pd = pd.DataFrame(calib_pd)
calib_pd = calib_pd.drop(to_erase)
self.calib_pd = calib_pd
def get_image_META(self, index):
"""
Returns Dictionary with metadata for all files relating to the antibody with *index*.
Metadata for fluorescence and background fluorescence with corresponding phase contrast images are provided.
:param index: int
:return:
"""
def consistence_check_bleach():
pass_coef = True
emsg = 'Following run is inconsistent: ' + self.PATH_DATA.split(SEPARATOR)[-1]
if len(bleach_fluo) == 0:
print('======================================')
print(emsg)
print('')
print('File: ')
print('Index:' + str(index))
print('Step file index: ' + str(self.antibody_indexes[index]))
print('Antibody: ' + source_fluo['antibody'])
print('Filter: ' + source_fluo['filter'])
print('Integration time: ' + str(source_fluo['integration_time']))
print('Bleaching-fluorescence contrast file missing')
print('======================================')
pass_coef = False
if len(bleach_phase) == 0:
print('======================================')
print(emsg)
print('')
print('File')
print('Index:' + str(index))
print('Step file index: ' + str(self.antibody_indexes[index]))
print('Antibody: ' + source_fluo['antibody'])
print('Bleaching-phase contrast file missing')
print('======================================')
pass_coef = False
return pass_coef
def consistence_check_source():
pass_coef = True
emsg = 'Following run is inconsistent: ' + self.PATH_DATA.split(SEPARATOR)[-1]
if len(source_fluo) == 0:
print(emsg)
print('File with index/antibody: ' + str(index) + 'does not exist')
pass_coef = False
if len(source_phase) == 0:
print(emsg)
print('Following file')
print('Index:' + str(index) + ' Antibody: ' + source_fluo['antibody'])
print('Misses file source-phase-contrast file with')
pass_coef = False
return pass_coef
if index >= self.__get_MELCDataset_len__(): return False
source_fluo = self.files_pd.loc[
(self.files_pd['step_index'] == self.antibody_indexes[index]) &
(self.files_pd['modality'] == 'fluo') &
(self.files_pd['type'] == 'source') &
((self.files_pd['antibody'] != 'NONE'))
]
if len(source_fluo) == 1: source_fluo = source_fluo.iloc[0]
else:
source_fluo = []
raise Exception('fuck')
bleach_fluo = self.files_pd.loc[
(self.files_pd['step_index'] == self.antibody_indexes[index]-1) &
(self.files_pd['modality'] == 'fluo') &
(self.files_pd['type'] == 'bleach') &
(self.files_pd['filter'] == source_fluo['filter']) &
(self.files_pd['integration_time'] == source_fluo['integration_time'])
]
if len(bleach_fluo) == 1: bleach_fluo = bleach_fluo.iloc[0]
else: bleach_fluo = []
source_phase = self.files_pd.loc[
(self.files_pd['step_index'] == self.antibody_indexes[index]) &
(self.files_pd['modality'] == 'phase') &
(self.files_pd['type'] == 'source')
]
if len(source_phase) == 1: source_phase = source_phase.iloc[0]
else: source_phase = []
bleach_phase = self.files_pd.loc[
(self.files_pd['step_index'] == self.antibody_indexes[index]-1) &
(self.files_pd['modality'] == 'phase') &
(self.files_pd['type'] == 'bleach')
]
if len(bleach_phase) == 1: bleach_phase = bleach_phase.iloc[0]
else: bleach_phase = []
if consistence_check_source():
source_fluo['indexes_1'] = np.array([self.border_max_y - source_fluo['registration'][0],
self.border_min_y - source_fluo['registration'][0]])
source_fluo['indexes_2'] = np.array([self.border_max_x - source_fluo['registration'][1],
self.border_min_x - source_fluo['registration'][1]])
source_phase['indexes_1'] = np.array([self.border_max_y - source_phase['registration'][0],
self.border_min_y - source_phase['registration'][0]])
source_phase['indexes_2'] = np.array([self.border_max_x - source_phase['registration'][1],
self.border_min_x - source_phase['registration'][1]])
else:
return False
if consistence_check_bleach():
bleach_fluo['indexes_1'] = np.array([self.border_max_y - bleach_fluo['registration'][0],
self.border_min_y - bleach_fluo['registration'][0]])
bleach_fluo['indexes_2'] = np.array([self.border_max_x - bleach_fluo['registration'][1],
self.border_min_x - bleach_fluo['registration'][1]])
bleach_phase['indexes_1'] = np.array([self.border_max_y - bleach_phase['registration'][0],
self.border_min_y - bleach_phase['registration'][0]])
bleach_phase['indexes_2'] = np.array([self.border_max_x - bleach_phase['registration'][1],
self.border_min_x - bleach_phase['registration'][1]])
else:
bleach_phase = False
bleach_fluo = False
print('Image is processed WITHOUT BACKGROUND SUBTRACTION')
return {'source_fluo': source_fluo,
'source_phase': source_phase,
'bleach_fluo': bleach_fluo,
'bleach_phase': bleach_phase
}
def get_subtracted(self, index):
"""
Returns phase contrast, fluorescence image and metadata of antibody indexed by input variable.
Background image and light field artifacts are already corrected for fluorescence.
:param index: int
:return:
"""
temp = self.get_image_META(index)
if temp == False: return False
bleach_bool = True
if type(temp['bleach_fluo']) == type(False):
bleach_bool = False
fluo_name = temp['source_fluo']['fid']
phase_name = temp['source_phase']['fid']
if exists(join(self.PATH_META_IMG, fluo_name + '.tif')) and exists(join(self.PATH_META_IMG, phase_name + '.tif')):
phase = tiff.imread(join(self.PATH_META_IMG, phase_name + '.tif')).astype(np.float32)
fluo = tiff.imread(join(self.PATH_META_IMG, fluo_name + '.tif')).astype(np.float32)
else:
try:
calib = cv2.imread(self.calib_pd.loc[
(self.calib_pd['calib_type'] == 'b') &
(self.calib_pd['filter'] == temp['source_fluo']['filter']) &
(self.calib_pd['integration_time'] == temp['source_fluo']['integration_time'])
].iloc[0]['path'], 2)
except:
## BCS THERE IS ONE RUN WITH INTEGRATION 8000ms and NO corresponding background file
calib = np.zeros(cv2.imread(temp['source_fluo']['path'], 2).astype(np.float32).shape)
print('There is missing calibration file to the file with index ' + str(index+2))
print('Image will be processed without background. CONSIDER the using of this image for further analysis')
source_fluo = cv2.imread(temp['source_fluo']['path'], 2).astype(np.float32)
source_phase = cv2.imread(temp['source_phase']['path'], 2).astype(np.float32)
if bleach_bool:
bleach_fluo =cv2.imread(temp['bleach_fluo']['path'], 2).astype(np.float32)
#bleach_phase = cv2.imread(temp['source_fluo']['path'], 2).astype(np.float32)
source_fluo = source_fluo - calib
if bleach_bool: bleach_fluo = bleach_fluo - calib
source_fluo = source_fluo[
temp['source_fluo']['indexes_1'][0]: source_fluo.shape[0] + temp['source_fluo']['indexes_1'][1],
temp['source_fluo']['indexes_2'][0]: source_fluo.shape[1] + temp['source_fluo']['indexes_2'][1]
]
if bleach_bool:
bleach_fluo = bleach_fluo[
temp['bleach_fluo']['indexes_1'][0]: bleach_fluo.shape[0] + temp['bleach_fluo']['indexes_1'][1],
temp['bleach_fluo']['indexes_2'][0]: bleach_fluo.shape[1] + temp['bleach_fluo']['indexes_2'][1]
]
source_phase = source_phase[
temp['source_phase']['indexes_1'][0]: source_phase.shape[0] + temp['source_phase']['indexes_1'][1],
temp['source_phase']['indexes_2'][0]: source_phase.shape[1] + temp['source_phase']['indexes_2'][1]
]
if bleach_bool:
fluo = source_fluo - bleach_fluo
else:
fluo = source_fluo
fluo = medfilt2d(fluo, 3)
phase = source_phase
fluo[fluo < 0] = 0
tiff.imsave(join(self.PATH_META_IMG, phase_name + '.tif'), phase.astype(np.uint16))
tiff.imsave(join(self.PATH_META_IMG, fluo_name + '.tif'), fluo.astype(np.uint16))
return phase, fluo, temp
def get_average_phase(self):
"""
Average phase image of registered phase contrast images from source (only fluorescence image corresponding phase
contrast images, not bleaching) folder of original raw data.
:return:
"""
print(self.PATH_META_IMG)
if exists(join(self.PATH_META_IMG, 'average_phase.tif')):
phase = tiff.imread(join(self.PATH_META_IMG, 'average_phase.tif')).astype(np.float32)
else:
temp_phase, fluo, temp = self.get_subtracted(0)
for k in range(1, self.__get_MELCDataset_len__()):
phase, fluo, temp = self.get_subtracted(k)
temp_phase = temp_phase + phase
phase = temp_phase / self.__get_MELCDataset_len__()
phase = phase.round()
phase[phase < 0] = 0
tiff.imsave(join(self.PATH_META_IMG, 'average_phase.tif'), phase.astype(np.uint16))
return phase
class MELCTiler(MELCDataset):
def __init__(self, path_data, path_annotations):
super(MELCTiler, self).__init__(path_data) # calls __init__ of MELCDataset to initialize all paths and run all the init. Everything is changed in this class then
#MELCDataset.__init__(self, path_data)
self.tile_size = 256
self.tile_overlap = 128
self.phase_average = self.get_average_phase()
self.im_shape = self.phase_average.shape
self.PATH_ANNOTATIONS = path_annotations
if not ('_'.join(self.PATH_ANNOTATIONS.split(SEPARATOR)[-1].split('_')[1:]) == '_'.join(self.PATH_DATA.split(SEPARATOR)[-1].split('_')[:2])):
error_str = 'Annotations ' '_'.join(self.PATH_ANNOTATIONS.split(SEPARATOR)[-1].split('_')[:2]) + ' do not match ' + \
'_'.join(self.PATH_DATA.split(SEPARATOR)[-1].split('_')[:2]) + ' data'
raise Exception(error_str)
self.annot_pd = self.__META_ANNOTATION_create()
def __META_ANNOTATION_create(self):
def get_FID(x):
return x['path'].split(SEPARATOR)[-1]
def get_X_idx(x):
temp = np.array(x['fid'].split('X')[-1].split('.')[0].split('_'))
temp_array = np.array([], dtype=temp.dtype)
for ele in temp:
if len(ele) > 0: temp_array = np.append(temp_array, ele)
return temp_array.astype(np.int16)
def get_Y_idx(x):
temp = np.array(x['fid'].split('X')[0].split('Y')[-1].split('_'))
temp_array = np.array([], dtype=temp.dtype)
for ele in temp:
if len(ele) > 0: temp_array = np.append(temp_array, ele)
return temp_array.astype(np.int16)
annotations_svg = myF.get_files(self.PATH_ANNOTATIONS, ('svg', 'SVG'))
annot_pd = pd.DataFrame(annotations_svg)
annot_pd = annot_pd.rename(columns={0: "path"})
annot_pd['fid'] = annot_pd.apply(lambda x: get_FID(x), axis=1)
annot_pd['Y_indexes'] = annot_pd.apply(lambda x: get_Y_idx(x), axis=1)
annot_pd['X_indexes'] = annot_pd.apply(lambda x: get_X_idx(x), axis=1)
return annot_pd
class MELCSynthesiser(MELCTiler):
def __init__(self, path_data, path_annotations):
self.annot_pd = []
#super(MELCTiler, self).__init__(path_data)
super(MELCSynthesiser, self).__init__(path_data, path_annotations)
#MELCTiler.__init__(self, path_data, path_annotations)
# take image of the background, initialise source files - annotations done by hand
# employ what has already been done.
self.background = self._get_background(self.phase_average)
fid = open(join(path_annotations, 'meta.json'), 'r')
self.annot_meta = json.loads(fid.read())
fid.close()
if self.annot_meta['original_file'].split('_')[0] == 'pb':
refImg_path = \
self.files_pd.loc[(self.files_pd['modality'] == 'phase') & (self.files_pd['type'] == 'bleach')][
'path'].reset_index(drop='True')[0]
else:
refImg_path = \
self.files_pd.loc[(self.files_pd['modality'] == 'phase') & (self.files_pd['type'] == 'source')][
'path'].reset_index(drop='True')[0]
## TODO IMPLEMENT POSSIBILITY FOR PHASE_AVERAGE DATA
refImg_path = refImg_path.split(SEPARATOR)
refImg_path[-1] = self.annot_meta['original_file']
refImg_path = SEPARATOR.join(refImg_path)
refImg_whole = cv2.imread(refImg_path, 2)
self.refImg_whole = refImg_whole
cell_list = []
cell_dict = {
'image': np.array([]),
'contour': np.array([])
}
frame = 25
for k in range(self.annot_pd.__len__()):
AnnotObj = SVGAnnot(self.annot_pd['path'][k])
annotations = AnnotObj.get_contours()
temp = self.annot_pd['path'][k].split(SEPARATOR)[-1].split('.')[0].split('_')
idx1_IMG = np.array([int(temp[3]), int(temp[4])])
idx2_IMG = np.array([int(temp[-2]), int(temp[-1])])
idx1_IMG = idx1_IMG + self.annot_meta['idx1']
idx2_IMG = idx2_IMG + self.annot_meta['idx2']
refImg = refImg_whole[idx1_IMG[0]:idx1_IMG[1], idx2_IMG[0]:idx2_IMG[1]]
for annot in annotations:
if len(annot > 0):# bcs there may be faulty annotation, just one click
idx1 = np.array([annot[:, 0, 1].min() - frame, annot[:, 0, 1].max() + frame])
idx2 = np.array([annot[:, 0, 0].min() - frame, annot[:, 0, 0].max() + frame])
if idx1[0] > frame and idx2[0] > frame \
and idx1[1] < refImg.shape[1] - 1 - frame and idx2[1] < refImg.shape[1] - frame - 1:
idx1[idx1 < 0] = 0
idx2[idx2 < 0] = 0
idx1[idx1 >= refImg.shape[0]] = refImg.shape[0] - 1
idx2[idx2 >= refImg.shape[1]] = refImg.shape[1] - 1
annotImg = np.zeros(refImg.shape, dtype=np.uint8)
annotImg = cv2.drawContours(annotImg, [annot], -1, 255, -1)
temp_dict = deepcopy(cell_dict)
temp_dict['image'] = refImg[idx1[0]:idx1[1], idx2[0]:idx2[1]]
#temp_dict['mask'] = annotImg[idx1[0]:idx1[1], idx2[0]:idx2[1]]
temp_dict['contour'] = annot
temp_dict['contour'][:, 0, 0] = temp_dict['contour'][:, 0, 0] - idx2[0]
temp_dict['contour'][:, 0, 1] = temp_dict['contour'][:, 0, 1] - idx1[0]
#temp_dict['image'] = refImg
#temp_dict['mask'] = annotImg
cell_list.append(temp_dict)
self.cell_list = cell_list
self.CellAugmenter = CellObjectAugmenter()
## WORKS SO FAR TO HERE
@classmethod
def _extend(cls, x, extend):
temp = np.array([])
extend = int(extend)
temp = np.append(temp, x[0, :].flatten())
temp = np.append(temp, x[-1, :].flatten())
temp = np.append(temp, x[:, 0].flatten())
temp = np.append(temp, x[:, -1].flatten())
y = np.zeros((x.shape[0] + 2 * extend, x.shape[1] + 2 * extend))
y[extend:-extend, extend:-extend] = x
return y
@classmethod
def _get_blurring_mask(self, size, std):
winY = signal.gaussian(size[0], std)
tempY = np.zeros((size[0], 1))
tempY[:, 0] = winY[:]
tempY = np.repeat(tempY, size[1], axis=1)
winX = signal.gaussian(size[1], std)
tempX = np.zeros((1, size[1]))
tempX[0, :] = winX[:]
tempX = np.repeat(tempX, size[0], axis=0)
win = tempX * tempY
win = win / win.sum()
return win
def _get_random_artificial_cell(self):
temp = self._get_random_cell()
img = temp['image']
contour = temp['contour']
img, contour = self.CellAugmenter.get_random_transform(img, [contour])
img, contour = self.CellAugmenter.get_random_resize(img, contour)
cntr2D = np.zeros(img.shape, dtype=np.uint8)
mask = cv2.drawContours(cntr2D, contour, -1, 255, -1)
halo_mask = cv2.dilate(mask, np.ones((3, 3), dtype=np.uint8),
iterations=2).astype(np.uint16)
halo_mask = halo_mask - mask
halo_max_value = img[halo_mask == 255].max()
img_base_value = img[halo_mask + mask > 0].min()
img = img - img_base_value
synthImg = np.zeros(img.shape)
synthImg[mask == 255] = halo_max_value
synthImg[halo_mask == 255] = img[halo_mask == 255]
win = self._get_blurring_mask((11, 11), 100)
synthImg = signal.convolve2d(self._extend(synthImg, 5), win, 'valid')
#figure()
#imshow(synthImg, cmap='gray')
#figure(999)
#plot(synthImg[:, 35])
win = self._get_blurring_mask((7, 7), 10)
for k in range(2):
synthImg[mask == 255] = halo_max_value
synthImg[halo_mask == 255] = img[halo_mask == 255]
#figure()
#imshow(synthImg, cmap='gray')
#figure(9999)
#plot(synthImg[:, 35])
synthImg = signal.convolve2d(self._extend(synthImg, 3), win, 'valid')
#figure()
#imshow(synthImg, cmap='gray')
#figure(9999)
#plot(synthImg[:, 35])
win = self._get_blurring_mask((3, 3), 3)
for k in range(2):
synthImg[mask == 255] = halo_max_value
synthImg[halo_mask == 255] = img[halo_mask == 255]
#figure()
#imshow(synthImg, cmap='gray')
#figure(99999)
#plot(synthImg[:, 35])
synthImg = signal.convolve2d(self._extend(synthImg, 1), win, 'valid')
#figure()
#imshow(synthImg, cmap='gray')
#figure(99999)
#plot(synthImg[:, 35])
synthImg[mask+halo_mask > 0] = img[mask+halo_mask > 0]
win = self._get_blurring_mask((3, 3), 0.8)
synthImg = signal.convolve2d(self._extend(synthImg, 1), win, 'valid')
# synthImg = synthImg + img_base_value
return synthImg, contour
def _get_random_cell(self):
return self.cell_list[randint(self.cell_list.__len__()-1)]
def _get_random_background(self, size_tuple):
s1 = size_tuple[0]
s2 = size_tuple[1]
beg0 = np.random.randint(100, self.background.shape[0] - 100 - s1)
beg1 = np.random.randint(100, self.background.shape[1] - 100 - s2)
return self.background[beg0:beg0 + s1, beg1:beg1 + s2]
@classmethod
def _get_background(cls, phase_contrast):
# suppression
background = deepcopy(phase_contrast).astype(np.float32)
threshold_value = 30.
background_value = np.median(background) * 0.96
#figure()
#plot(self.phase_average[:, 1000])
#plot(np.ones(2000) + background_value)
#figure()
#imshow(self.background, cmap='gray')
for k in range(5):
temp_img = background.copy() - background_value
temp1 = np.zeros(background.shape)
temp2 = np.zeros(background.shape)
temp1[temp_img > threshold_value] = 1
temp2[temp_img < -2*threshold_value] = 1
background[temp1 == 1] = background[temp1 == 1] - background_value
background[temp1 == 1] = background[temp1 == 1] / 2 + background_value
background[temp2 == 1] = background[temp2 == 1] - background_value
background[temp2 == 1] = 2*background[temp2 == 1] / 3 + background_value
return background
#figure()
#plot(self.phase_average[:, 1000])
#plot(self.background[:, 1000])
#plot(np.zeros(2000) + background_value)
#plot(np.zeros(2000) + threshold_value + background_value)
#plot(np.zeros(2000) - threshold_value + background_value)
#figure()
#temp = self.background.copy()
#temp[-1, -1] = self.phase_average.max()
#temp[0, 0] = self.phase_average.min()
#imshow(temp, cmap='gray')
def generate_synthetic_image(self, size_tuple):
step_px = 40
border_px = 25 # 30
max_shift_px = 20
probability_of_cell_placement = np.random.rand() * 0.2 + 0.7
probability_of_cell_placement = 1
background_multiply_ratio = np.random.rand()*0.2+0.9
annotations = list()
Y = np.zeros(size_tuple)
Y_background = self._get_random_background(size_tuple) * background_multiply_ratio
Y_nobackground = np.zeros(Y.shape)
Y_masks = np.zeros(Y.shape)
grid1 = np.arange(max_shift_px + border_px, Y.shape[0] - border_px - max_shift_px, step_px).round().astype(np.int32)
grid2 = np.arange(max_shift_px + border_px, Y.shape[0] - border_px - max_shift_px, step_px).round().astype(np.int32)
cntr = 0
for k1 in grid1:
for k2 in grid2:
luck = np.random.rand()
if luck < probability_of_cell_placement:
cntr += 1
global_position_center_1 = k1
global_position_center_2 = k2
shift_1 = np.random.randint(-max_shift_px, +max_shift_px, 1)[0]
shift_2 = np.random.randint(-max_shift_px, +max_shift_px, 1)[0]
global_position_center_1 += shift_1
global_position_center_2 += shift_2
cell_image, annotation_contour = self._get_random_artificial_cell()
if cell_image.shape[0] % 2 == 0:
temp_cell_image = np.zeros((cell_image.shape[0] + 1, cell_image.shape[1]))
temp_cell_image[:-1, :] = cell_image
cell_image = temp_cell_image
if cell_image.shape[1] % 2 == 0:
temp_cell_image = np.zeros((cell_image.shape[0], cell_image.shape[1] + 1))
temp_cell_image[:, :-1] = cell_image
cell_image = temp_cell_image
cell_center = np.array(
[
np.floor((annotation_contour[0][:, 0, 1].max() + annotation_contour[0][:, 0, 1].min()) / 2),
np.floor((annotation_contour[0][:, 0, 0].max() + annotation_contour[0][:, 0, 0].min()) / 2)
]
)
annotation_mask = np.zeros(cell_image.shape, dtype=np.uint8)
annotation_mask = cv2.drawContours(annotation_mask, annotation_contour, -1, 255, -1)
contour_to_img = deepcopy(annotation_contour[0])
contour_to_img[:, 0, 0] = contour_to_img[:, 0, 0] - cell_center[1] + global_position_center_2
contour_to_img[:, 0, 1] = contour_to_img[:, 0, 1] - cell_center[0] + global_position_center_1
temp_big_mask = np.zeros(Y.shape)
temp_big_mask = cv2.drawContours(temp_big_mask, [contour_to_img], -1, 255, -1)
temp_big_img = | np.zeros(Y.shape) | numpy.zeros |
'''
desisim.quickcat
================
Code for quickly generating an output zcatalog given fiber assignment tiles,
a truth catalog, and optionally a previous zcatalog.
'''
from __future__ import absolute_import, division, print_function
import os
import yaml
from collections import Counter
from pkg_resources import resource_filename
from time import asctime
import numpy as np
from astropy.io import fits
from astropy.table import Table, Column, vstack
import sys
import scipy.special as sp
import desisim
from desisim.targets import get_simtype
import astropy.constants
c = astropy.constants.c.to('km/s').value
from desitarget.targetmask import desi_mask, bgs_mask, mws_mask
from desiutil.log import get_logger
log = get_logger()
#- redshift errors, zwarn, cata fail rate fractions from
#- /project/projectdirs/desi/datachallenge/redwood/spectro/redux/redwood/
#- sigmav = c sigmaz / (1+z)
_sigma_v = {
# 'ELG': 38.03,
# 'LRG': 67.38,
'BGS': 37.70,
# 'QSO': 182.16,
'STAR': 51.51,
'WD':54.35,
'SKY': 9999, #- meaningless
'UNKNOWN': 9999, #- meaningless
}
_zwarn_fraction = {
# 'ELG': 0.087,
# 'LRG': 0.007,
# 'QSO': 0.020,
'BGS': 0.024,
'STAR': 0.345,
'WD':0.094,
'SKY': 1.0,
'UNKNOWN': 1.0,
}
_cata_fail_fraction = {
# 'ELG': 0.020,
# 'LRG': 0.002,
# 'QSO': 0.012,
'BGS': 0.003,
'STAR': 0.050,
'WD':0.0,
'SKY': 0.,
'UNKNOWN': 0.,
}
def get_zeff_obs(simtype, obsconditions):
'''
'''
if(simtype=='LRG'):
p_v = [1.0, 0.15, -0.5]
p_w = [1.0, 0.4, 0.0]
p_x = [1.0, 0.06, 0.05]
p_y = [1.0, 0.0, 0.08]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.02
elif(simtype=='QSO'):
p_v = [1.0, -0.2, 0.3]
p_w = [1.0, -0.5, 0.6]
p_x = [1.0, -0.1, -0.075]
p_y = [1.0, -0.08, -0.04]
p_z = [1.0, 0.0, 0.0]
sigma_r = 0.05
elif(simtype=='ELG'):
p_v = [1.0, -0.1, -0.2]
p_w = [1.0, 0.25, -0.75]
p_x = [1.0, 0.0, 0.05]
p_y = [1.0, 0.2, 0.1]
p_z = [1.0, -10.0, 300.0]
sigma_r = 0.075
else:
log.warning('No model for how observing conditions impact {} redshift efficiency'.format(simtype))
return np.ones(len(obsconditions))
ncond = len(np.atleast_1d(obsconditions['AIRMASS']))
# airmass
v = obsconditions['AIRMASS'] - np.mean(obsconditions['AIRMASS'])
pv = p_v[0] + p_v[1] * v + p_v[2] * (v**2. - np.mean(v**2))
# ebmv
if 'EBMV' in obsconditions :
w = obsconditions['EBMV'] - np.mean(obsconditions['EBMV'])
pw = p_w[0] + p_w[1] * w + p_w[2] * (w**2 - np.mean(w**2))
else :
pw = np.ones(ncond)
# seeing
x = obsconditions['SEEING'] - np.mean(obsconditions['SEEING'])
px = p_x[0] + p_x[1]*x + p_x[2] * (x**2 - np.mean(x**2))
# transparency
if 'LINTRANS' in obsconditions :
y = obsconditions['LINTRANS'] - np.mean(obsconditions['LINTRANS'])
py = p_y[0] + p_y[1]*y + p_y[2] * (y**2 - np.mean(y**2))
else :
py = np.ones(ncond)
# moon illumination fraction
z = obsconditions['MOONFRAC'] - np.mean(obsconditions['MOONFRAC'])
pz = p_z[0] + p_z[1]*z + p_z[2] * (z**2 - np.mean(z**2))
#- if moon is down phase doesn't matter
pz = np.ones(ncond)
pz[obsconditions['MOONALT'] < 0] = 1.0
pr = 1.0 + np.random.normal(size=ncond, scale=sigma_r)
#- this correction factor can be greater than 1, but not less than 0
pobs = (pv * pw * px * py * pz * pr).clip(min=0.0)
return pobs
def get_redshift_efficiency(simtype, targets, truth, targets_in_tile, obsconditions, params, ignore_obscondition=False):
"""
Simple model to get the redshift effiency from the observational conditions or observed magnitudes+redshuft
Args:
simtype: ELG, LRG, QSO, MWS, BGS
targets: target catalog table; currently used only for TARGETID
truth: truth table with OIIFLUX, TRUEZ
targets_in_tile: dictionary. Keys correspond to tileids, its values are the
arrays of targetids observed in that tile.
obsconditions: table observing conditions with columns
'TILEID': array of tile IDs
'AIRMASS': array of airmass values on a tile
'EBMV': array of E(B-V) values on a tile
'LINTRANS': array of atmospheric transparency during spectro obs; floats [0-1]
'MOONFRAC': array of moonfraction values on a tile.
'SEEING': array of FWHM seeing during spectroscopic observation on a tile.
parameter_filename: yaml file with quickcat parameters
ignore_obscondition: if True, no variation of efficiency with obs. conditions (adjustment of exposure time should correct for mean change of S/N)
Returns:
tuple of arrays (observed, p) both with same length as targets
observed: boolean array of whether the target was observed in these tiles
p: probability to get this redshift right
"""
targetid = targets['TARGETID']
n = len(targetid)
try:
if 'DECAM_FLUX' in targets.dtype.names :
true_gflux = targets['DECAM_FLUX'][:, 1]
true_rflux = targets['DECAM_FLUX'][:, 2]
else:
true_gflux = targets['FLUX_G']
true_rflux = targets['FLUX_R']
except:
raise Exception('Missing photometry needed to estimate redshift efficiency!')
a_small_flux=1e-40
true_gflux[true_gflux<a_small_flux]=a_small_flux
true_rflux[true_rflux<a_small_flux]=a_small_flux
if (obsconditions is None) or ('OIIFLUX' not in truth.dtype.names):
raise Exception('Missing obsconditions and flux information to estimate redshift efficiency')
if (simtype == 'ELG'):
# Read the model OII flux threshold (FDR fig 7.12 modified to fit redmonster efficiency on OAK)
# filename = resource_filename('desisim', 'data/quickcat_elg_oii_flux_threshold.txt')
# Read the model OII flux threshold (FDR fig 7.12)
filename = resource_filename('desisim', 'data/elg_oii_flux_threshold_fdr.txt')
fdr_z, modified_fdr_oii_flux_threshold = np.loadtxt(filename, unpack=True)
# Compute OII flux thresholds for truez
oii_flux_limit = np.interp(truth['TRUEZ'],fdr_z,modified_fdr_oii_flux_threshold)
oii_flux_limit[oii_flux_limit<1e-20]=1e-20
# efficiency is modeled as a function of flux_OII/f_OII_threshold(z) and an arbitrary sigma_fudge
snr_in_lines = params["ELG"]["EFFICIENCY"]["SNR_LINES_SCALE"]*7*truth['OIIFLUX']/oii_flux_limit
snr_in_continuum = params["ELG"]["EFFICIENCY"]["SNR_CONTINUUM_SCALE"]*true_rflux
snr_tot = | np.sqrt(snr_in_lines**2+snr_in_continuum**2) | numpy.sqrt |
import datetime
import os
import tempfile
import numpy as np
import scipy.stats as st
import xarray as xr
from wildfire.data import goes_level_1
def test_goes_band(goes_level_1_mesoscale):
actual = goes_level_1.GoesBand(dataset=goes_level_1_mesoscale)
assert actual.region == "M1"
assert actual.satellite == "G16"
np.testing.assert_almost_equal(actual.band_wavelength_micrometers, 0.47, decimal=2)
assert actual.scan_time_utc == datetime.datetime(2020, 1, 1, 0, 1, 26, 200000)
assert actual.band_id == 1
assert actual.parse().equals(actual.reflectance_factor)
assert np.isnan(actual.reflectance_factor.data).sum() == 0
assert (
np.isnan(actual.brightness_temperature.data).sum() == actual.dataset.Rad.data.size
)
np.testing.assert_array_equal(
actual.normalize(), st.zscore(actual.reflectance_factor, axis=None)
)
np.testing.assert_array_equal(
actual.normalize(use_radiance=True), st.zscore(actual.dataset.Rad, axis=None),
)
with tempfile.TemporaryDirectory() as temp_directory:
filepath = actual.to_netcdf(directory=temp_directory)
assert os.path.exists(filepath)
assert isinstance(xr.open_dataset(filepath), xr.core.dataset.Dataset)
def test_reflective_band(goes_level_1_mesoscale):
actual = goes_level_1.GoesBand(dataset=goes_level_1_mesoscale)
assert actual.parse().equals(actual.reflectance_factor)
assert np.isnan(actual.reflectance_factor.data).sum() == 0
np.testing.assert_array_equal(
actual.normalize(), st.zscore(actual.reflectance_factor, axis=None)
)
def test_emissive_band(goes_level_1_channel_7):
actual = goes_level_1.GoesBand(dataset=goes_level_1_channel_7)
assert actual.parse().equals(actual.brightness_temperature)
assert | np.isnan(actual.brightness_temperature.data) | numpy.isnan |
import time
from numpy.linalg import norm
import numpy as np
from multiprocessing import Process, Manager, Lock, Pool
from ctypes import c_int
from itertools import product
import pandas as pd
Ntotlock = Lock()
class Scan:
def __init__(self, likelihood, par_min, par_max, N_iters):
self.likelihood = likelihood
if isinstance(par_min, float) and isinstance(par_max, float):
self.Npars = 1
self.par_min = [par_min,]
self.par_max = [par_max,]
elif len(par_min) == len(par_max):
self.Npars = len(par_min)
self.par_min = par_min
self.par_max = par_max
else:
raise Exception("The length of the limits of the parameter doesn't match!")
self.N_iters = N_iters
self.points = []
self.lh_list = []
self.Ntot = 0
self.mp = False
def run(self, *args):
raise NotImplementedError("You have to define the scan")
def run_time(self, *args):
self.start = time.time()
self.run(*args)
self.end = time.time()
print("Running time: " + str(self.end-self.start) + 's')
def increasecounter(self, Ntot):
if self.mp:
with Ntotlock:
self.Ntot.value += Ntot
else:
self.Ntot += Ntot
def run_mp(self, cores, *args):
self.mp = True
with Manager() as manager:
self.points = manager.list(self.points)
self.lh_list = manager.list(self.lh_list)
self.Ntot = manager.Value(c_int, self.Ntot)
processes = []
for i in range(0, cores):
p = Process(target = self.run, args = args)
p.start()
np.random.seed(int(p.pid + time.time())) #We have to reseed each process, or else they will produce the same random numbers
processes.append(p)
for p in processes:
p.join()
self.points = list(self.points)
self.lh_list = list(self.lh_list)
self.Ntot = int(self.Ntot.value)
self.mp = False
def run_mp_time(self, cores):
self.start = time.time()
self.run_mp(cores)
self.end = time.time()
print("Running time: " + str(self.end-self.start) + 's')
def clear(self):
self.points = []
self.lh_list = []
self.Ntot = 0
def get_points(self):
return self.points
def get_lh_list(self, index=None):
if index == None:
return self.lh_list
else:
s = []
for i in range(0, len(self.lh_list)):
s.append(self.lh_list[i][index])
return s
def get_point_series(self, coord):
s = []
for i in range(0, len(self.points)):
s.append(self.points[i][coord])
return s
def interpolate(self, point):
num = 0
den = 0
for i in range(0, len(self.points)):
d = norm(np.array(point) - np.array(self.points[i]))
if d == 0:
return self.lh_list[i]
else:
num += self.lh_list[i]/d**4
den += 1/d**4
return num/den
def write(self, fout, mode='wt'):
with open(fout, mode) as f:
for p, l in zip(self.points, self.lh_list):
for i in range(0, self.Npars):
f.write(str(p[i])+'\t')
f.write(str(l)+'\n')
def save_csv(self, fout):
df_points = pd.DataFrame(self.points, columns=['x','y'])
df_lh = pd.DataFrame(self.lh_list)
df_total = pd.concat([df_points, df_lh], axis=1, join='outer')
df_total.to_csv(fout, sep='\t', index=False)
def inthebox(self, point):
for p in range(0, self.Npars):
if point[p] < self.par_min[p]:
return False
if point[p] > self.par_max[p]:
return False
return True
def acceptance(self):
return len(self.points)/self.Ntot
def bestpoint(self):
return self.points[np.argmax(self.lh_list)]
def expectedvalue(self, func, *args):
lhmax = | np.max(self.lh_list) | numpy.max |
from __future__ import division
from __future__ import print_function
import panoramasdk
import cv2
import numpy as np
import boto3
# Global Variables
HEIGHT = 512
WIDTH = 512
MODEL = 'pikachu'
class PikachuDetection(panoramasdk.base):
def interface(self):
return {
"parameters":
(
("float", "threshold", "Detection threshold", 0.50),
("model", "pokemon_detection", "Model for detecting pokemon", MODEL),
("int", "batch_size", "Model batch size", 1),
("float", "pokemon_index", "pokemon index based on dataset used", 0),
),
"inputs":
(
("media[]", "video_in", "Camera input stream"),
),
"outputs":
(
("media[video_in]", "video_out", "Camera output stream"),
)
}
def init(self, parameters, inputs, outputs):
try:
# Frame Number Initialization
self.frame_num = 0
# Index for pokemon from parameters
self.index = parameters.pokemon_index
# Set threshold for model from parameters
self.threshold = parameters.threshold
# set number of pokemon
self.number_pokemon = 0
# Load model from the specified directory.
print("loading the model...")
self.model = panoramasdk.model()
self.model.open(parameters.pokemon_detection, 1)
print("model loaded")
# Create input and output arrays.
class_info = self.model.get_output(0)
prob_info = self.model.get_output(1)
rect_info = self.model.get_output(2)
self.class_array = np.empty(class_info.get_dims(), dtype=class_info.get_type())
self.prob_array = np.empty(prob_info.get_dims(), dtype=prob_info.get_type())
self.rect_array = np.empty(rect_info.get_dims(), dtype=rect_info.get_type())
return True
except Exception as e:
print("Exception: {}".format(e))
return False
def preprocess(self, img):
resized = cv2.resize(img, (HEIGHT, WIDTH))
mean = [0.485, 0.456, 0.406] # RGB
std = [0.229, 0.224, 0.225] # RGB
img = resized.astype(np.float32) / 255. # converting array of ints to floats
img_a = img[:, :, 0]
img_b = img[:, :, 1]
img_c = img[:, :, 2]
# Extracting single channels from 3 channel image
# The above code could also be replaced with cv2.split(img) << which will return 3 numpy arrays (using opencv)
# normalizing per channel data:
img_a = (img_a - mean[0]) / std[0]
img_b = (img_b - mean[1]) / std[1]
img_c = (img_c - mean[2]) / std[2]
# putting the 3 channels back together:
x1 = [[[], [], []]]
x1[0][0] = img_a
x1[0][1] = img_b
x1[0][2] = img_c
# x1 = mx.nd.array(np.asarray(x1))
x1 = np.asarray(x1)
return x1
def get_number_pokemon(self, class_data, prob_data):
# get indices of people detections in class data
pokemon_indices = [i for i in range(len(class_data)) if int(class_data[i]) == self.index]
# use these indices to filter out anything that is less than 95% threshold from prob_data
prob_pokemon_indices = [i for i in pokemon_indices if prob_data[i] >= self.threshold]
return prob_pokemon_indices
def entry(self, inputs, outputs):
self.frame_num += 1
for i in range(len(inputs.video_in)):
stream = inputs.video_in[i]
pokemon_image = stream.image
stream.add_label('Number of Pikachu : {}'.format(self.number_pokemon), 0.6, 0.05)
x1 = self.preprocess(pokemon_image)
# Do inference on the new frame.
self.model.batch(0, x1)
self.model.flush()
# Get the results.
resultBatchSet = self.model.get_result()
class_batch = resultBatchSet.get(0)
prob_batch = resultBatchSet.get(1)
rect_batch = resultBatchSet.get(2)
class_batch.get(0, self.class_array)
prob_batch.get(0, self.prob_array)
rect_batch.get(0, self.rect_array)
class_data = self.class_array[0]
prob_data = self.prob_array[0]
rect_data = self.rect_array[0]
# Get Indices of classes that correspond to Pokemon
pokemon_indices = self.get_number_pokemon(class_data, prob_data)
print('pokemon indices is {}'.format(pokemon_indices))
try:
self.number_pokemon = len(pokemon_indices)
except:
self.number_pokemon = 0
# Draw Bounding Boxes on HDMI Output
if self.number_pokemon > 0:
for index in pokemon_indices:
left = np.clip(rect_data[index][0] / np.float(HEIGHT), 0, 1)
top = np.clip(rect_data[index][1] / | np.float(HEIGHT) | numpy.float |
import copy as python_copy
import itertools
import uuid
from pprint import pprint
from typing import Any, Dict, List, Optional, Tuple, Union
import networkx as nx
import numpy as np
from numpy import cos, float64, int64, mod, ndarray, pi, sin
from omegaconf import OmegaConf
from omegaconf.listconfig import ListConfig
from phidl.device_layout import Device, DeviceReference, Label, _parse_layer
from pp.compare_cells import hash_cells
from pp.config import conf
from pp.get_netlist import get_netlist
from pp.port import Port, select_ports
def copy(D):
"""returns a copy of a Component."""
D_copy = Component(name=D._internal_name)
D_copy.info = python_copy.deepcopy(D.info)
for ref in D.references:
new_ref = ComponentReference(
ref.parent,
origin=ref.origin,
rotation=ref.rotation,
magnification=ref.magnification,
x_reflection=ref.x_reflection,
)
new_ref.owner = D_copy
D_copy.add(new_ref)
for alias_name, alias_ref in D.aliases.items():
if alias_ref == ref:
D_copy.aliases[alias_name] = new_ref
for port in D.ports.values():
D_copy.add_port(port=port)
for poly in D.polygons:
D_copy.add_polygon(poly)
for label in D.labels:
D_copy.add_label(
text=label.text,
position=label.position,
layer=(label.layer, label.texttype),
)
return D_copy
class SizeInfo:
def __init__(self, bbox: ndarray) -> None:
self.west = bbox[0, 0]
self.east = bbox[1, 0]
self.south = bbox[0, 1]
self.north = bbox[1, 1]
self.width = self.east - self.west
self.height = self.north - self.south
xc = 0.5 * (self.east + self.west)
yc = 0.5 * (self.north + self.south)
self.sw = np.array([self.west, self.south])
self.se = np.array([self.east, self.south])
self.nw = np.array([self.west, self.north])
self.ne = np.array([self.east, self.north])
self.cw = np.array([self.west, yc])
self.ce = np.array([self.east, yc])
self.nc = | np.array([xc, self.north]) | numpy.array |
from __future__ import division
import numpy as np
import scipy
import scipy.stats
import scipy.fftpack
import scipy.optimize
import stingray.lightcurve as lightcurve
import stingray.utils as utils
from stingray.exceptions import StingrayError
from stingray.gti import cross_two_gtis, bin_intervals_from_gtis, check_gtis
__all__ = ["Crossspectrum", "AveragedCrossspectrum", "coherence"]
def coherence(lc1, lc2):
"""
Estimate coherence function of two light curves.
Parameters
----------
lc1: lightcurve.Lightcurve object
The first light curve data for the channel of interest.
lc2: lightcurve.Lightcurve object
The light curve data for reference band
Returns
-------
coh : np.ndarray
Coherence function
"""
if not isinstance(lc1, lightcurve.Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, lightcurve.Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
cs = Crossspectrum(lc1, lc2, norm='none')
return cs.coherence()
class Crossspectrum(object):
def __init__(self, lc1=None, lc2=None, norm='none', gti=None):
"""
Make a cross spectrum from a (binned) light curve.
You can also make an empty Crossspectrum object to populate with your
own fourier-transformed data (this can sometimes be useful when making
binned periodograms).
Parameters
----------
lc1: lightcurve.Lightcurve object, optional, default None
The first light curve data for the channel/band of interest.
lc2: lightcurve.Lightcurve object, optional, default None
The light curve data for the reference band.
norm: {'frac', 'abs', 'leahy', 'none'}, default 'none'
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
[[gti0_0, gti0_1], [gti1_0, gti1_1], ...] -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra (complex numbers)
df: float
The frequency resolution
m: int
The number of averaged cross-spectra amplitudes in each bin.
n: int
The number of data points/time bins in one segment of the light
curves.
nphots1: float
The total number of photons in light curve 1
nphots2: float
The total number of photons in light curve 2
"""
if isinstance(norm, str) is False:
raise TypeError("norm must be a string")
if norm.lower() not in ["frac", "abs", "leahy", "none"]:
raise ValueError("norm must be 'frac', 'abs', 'leahy', or 'none'!")
self.norm = norm.lower()
# check if input data is a Lightcurve object, if not make one or
# make an empty Crossspectrum object if lc1 == None or lc2 == None
if lc1 is None or lc2 is None:
if lc1 is not None or lc2 is not None:
raise TypeError("You can't do a cross spectrum with just one "
"light curve!")
else:
self.freq = None
self.power = None
self.df = None
self.nphots1 = None
self.nphots2 = None
self.m = 1
self.n = None
return
self.gti = gti
self.lc1 = lc1
self.lc2 = lc2
self._make_crossspectrum(lc1, lc2)
def _make_crossspectrum(self, lc1, lc2):
# make sure the inputs work!
if not isinstance(lc1, lightcurve.Lightcurve):
raise TypeError("lc1 must be a lightcurve.Lightcurve object")
if not isinstance(lc2, lightcurve.Lightcurve):
raise TypeError("lc2 must be a lightcurve.Lightcurve object")
# Then check that GTIs make sense
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
if self.gti.shape[0] != 1:
raise TypeError("Non-averaged Cross Spectra need "
"a single Good Time Interval")
lc1 = lc1.split_by_gti()[0]
lc2 = lc2.split_by_gti()[0]
# total number of photons is the sum of the
# counts in the light curve
self.nphots1 = np.float64(np.sum(lc1.counts))
self.nphots2 = np.float64(np.sum(lc2.counts))
self.meancounts1 = np.mean(lc1.counts)
self.meancounts2 = np.mean(lc2.counts)
# the number of data points in the light curve
if lc1.n != lc2.n:
raise StingrayError("Light curves do not have same number "
"of time bins per segment.")
if lc1.dt != lc2.dt:
raise StingrayError("Light curves do not have "
"same time binning dt.")
self.n = lc1.n
# the frequency resolution
self.df = 1.0/lc1.tseg
# the number of averaged periodograms in the final output
# This should *always* be 1 here
self.m = 1
# make the actual Fourier transform and compute cross spectrum
self.freq, self.unnorm_power = self._fourier_cross(lc1, lc2)
# If co-spectrum is desired, normalize here. Otherwise, get raw back
# with the imaginary part still intact.
self.power = self._normalize_crossspectrum(self.unnorm_power, lc1.tseg)
def _fourier_cross(self, lc1, lc2):
"""
Fourier transform the two light curves, then compute the cross spectrum.
Computed as CS = lc1 x lc2* (where lc2 is the one that gets
complex-conjugated)
Parameters
----------
lc1: lightcurve.Lightcurve object
One light curve to be Fourier transformed. Ths is the band of
interest or channel of interest.
lc2: lightcurve.Lightcurve object
Another light curve to be Fourier transformed.
This is the reference band.
Returns
-------
fr: numpy.ndarray
The squared absolute value of the Fourier amplitudes
"""
fourier_1 = scipy.fftpack.fft(lc1.counts) # do Fourier transform 1
fourier_2 = scipy.fftpack.fft(lc2.counts) # do Fourier transform 2
freqs = scipy.fftpack.fftfreq(lc1.n, lc1.dt)
cross = fourier_1[freqs > 0] * np.conj(fourier_2[freqs > 0])
return freqs[freqs > 0], cross
def rebin(self, df, method="mean"):
"""
Rebin the cross spectrum to a new frequency resolution df.
Parameters
----------
df: float
The new frequency resolution
Returns
-------
bin_cs = Crossspectrum object
The newly binned cross spectrum
"""
# rebin cross spectrum to new resolution
binfreq, bincs, step_size = utils.rebin_data(self.freq,
self.power, df,
method=method)
# make an empty cross spectrum object
# note: syntax deliberate to work with subclass Powerspectrum
bin_cs = self.__class__()
# store the binned periodogram in the new object
bin_cs.freq = binfreq
bin_cs.power = bincs
bin_cs.df = df
bin_cs.n = self.n
bin_cs.norm = self.norm
bin_cs.nphots1 = self.nphots1
bin_cs.nphots2 = self.nphots2
bin_cs.m = int(step_size)*self.m
return bin_cs
def _normalize_crossspectrum(self, unnorm_power, tseg):
"""
Normalize the real part of the cross spectrum to Leahy, absolute rms^2,
fractional rms^2 normalization, or not at all.
Parameters
----------
unnorm_power: numpy.ndarray
The unnormalized cross spectrum.
tseg: int
The length of the Fourier segment, in seconds.
Returns
-------
power: numpy.nd.array
The normalized co-spectrum (real part of the cross spectrum). For
'none' normalization, imaginary part is returned as well.
"""
# The "effective" counst/bin is the geometrical mean of the counts/bin
# of the two light curves
log_nphots1 = np.log(self.nphots1)
log_nphots2 = np.log(self.nphots2)
actual_nphots = np.float64(np.sqrt(np.exp(log_nphots1 + log_nphots2)))
actual_mean = np.sqrt(self.meancounts1 * self.meancounts2)
assert actual_mean > 0.0, \
"Mean count rate is <= 0. Something went wrong."
if self.norm.lower() == 'leahy':
c = unnorm_power.real
power = c * 2. / actual_nphots
elif self.norm.lower() == 'frac':
c = unnorm_power.real / np.float(self.n**2.)
power = c * 2. * tseg / (actual_mean**2.0)
elif self.norm.lower() == 'abs':
c = unnorm_power.real / np.float(self.n**2.)
power = c * (2. * tseg)
elif self.norm.lower() == 'none':
power = unnorm_power
else:
raise Exception("Normalization not recognized!")
return power
def rebin_log(self, f=0.01):
"""
Logarithmic rebin of the periodogram.
The new frequency depends on the previous frequency
modified by a factor f:
dnu_j = dnu_{j-1}*(1+f)
Parameters
----------
f: float, optional, default 0.01
parameter that steers the frequency resolution
Returns
-------
binfreq: numpy.ndarray
the binned frequencies
binpower: numpy.ndarray
the binned powers
nsamples: numpy.ndarray
the samples of the original periodogram included in each
frequency bin
"""
minfreq = self.freq[1] * 0.5 # frequency to start from
maxfreq = self.freq[-1] # maximum frequency to end
binfreq = [minfreq, minfreq + self.df] # first
df = self.freq[1] # the frequency resolution of the first bin
# until we reach the maximum frequency, increase the width of each
# frequency bin by f
while binfreq[-1] <= maxfreq:
binfreq.append(binfreq[-1] + df*(1.0+f))
df = binfreq[-1] - binfreq[-2]
# compute the mean of the powers that fall into each new frequency bin.
# we cast to np.double due to scipy's bad handling of longdoubles
binpower, bin_edges, binno = scipy.stats.binned_statistic(
self.freq.astype(np.double), self.power.astype(np.double),
statistic="mean", bins=binfreq)
# compute the number of powers in each frequency bin
nsamples = np.array([len(binno[np.where(binno == i)[0]])
for i in range(np.max(binno))])
# the frequency resolution
df = np.diff(binfreq)
# shift the lower bin edges to the middle of the bin and drop the
# last right bin edge
binfreq = binfreq[:-1] + df/2
return binfreq, binpower, nsamples
def coherence(self):
"""
Compute Coherence function of the cross spectrum. Coherence is a
Fourier frequency dependent measure of the linear correlation
between time series measured simultaneously in two energy channels.
Returns
-------
coh : numpy.ndarray
Coherence function
References
----------
.. [1] http://iopscience.iop.org/article/10.1086/310430/pdf
"""
# this computes the averaged power spectrum, but using the
# cross spectrum code to avoid circular imports
ps1 = Crossspectrum(self.lc1, self.lc1)
ps2 = Crossspectrum(self.lc2, self.lc2)
return self.unnorm_power/(ps1.unnorm_power * ps2.unnorm_power)
def _phase_lag(self):
"""Return the fourier phase lag of the cross spectrum."""
return np.angle(self.power)
def time_lag(self):
"""
Calculate the fourier time lag of the cross spectrum. The time lag is
calculate using the center of the frequency bins.
"""
if self.__class__ in [Crossspectrum, AveragedCrossspectrum]:
ph_lag = self._phase_lag()
return ph_lag / (2 * np.pi * self.freq)
else:
raise AttributeError("Object has no attribute named 'time_lag' !")
class AveragedCrossspectrum(Crossspectrum):
def __init__(self, lc1=None, lc2=None, segment_size=None,
norm='none', gti=None):
"""
Make an averaged cross spectrum from a light curve by segmenting two
light curves, Fourier-transforming each segment and then averaging the
resulting cross spectra.
Parameters
----------
lc1: lightcurve.Lightcurve object OR
iterable of lightcurve.Lightcurve objects
One light curve data to be Fourier-transformed. This is the band
of interest or channel of interest.
lc2: lightcurve.Lightcurve object OR
iterable of lightcurve.Lightcurve objects
Second light curve data to be Fourier-transformed. This is the
reference band.
segment_size: float
The size of each segment to average. Note that if the total
duration of each Lightcurve object in lc1 or lc2 is not an
integer multiple of the segment_size, then any fraction left-over
at the end of the time series will be lost. Otherwise you introduce
artefacts.
norm: {'frac', 'abs', 'leahy', 'none'}, default 'none'
The normalization of the (real part of the) cross spectrum.
Other Parameters
----------------
gti: 2-d float array
[[gti0_0, gti0_1], [gti1_0, gti1_1], ...] -- Good Time intervals.
This choice overrides the GTIs in the single light curves. Use with
care!
Attributes
----------
freq: numpy.ndarray
The array of mid-bin frequencies that the Fourier transform samples
power: numpy.ndarray
The array of cross spectra
df: float
The frequency resolution
m: int
The number of averaged cross spectra
n: int
The number of time bins per segment of light curve?
nphots1: float
The total number of photons in the first (interest) light curve
nphots2: float
The total number of photons in the second (reference) light curve
gti: 2-d float array
[[gti0_0, gti0_1], [gti1_0, gti1_1], ...] -- Good Time intervals.
They are calculated by taking the common GTI between the
two light curves
"""
self.type = "crossspectrum"
if segment_size is not None:
if not np.isfinite(segment_size):
raise ValueError("segment_size must be finite")
self.segment_size = segment_size
Crossspectrum.__init__(self, lc1, lc2, norm, gti=gti)
return
def _make_segment_spectrum(self, lc1, lc2, segment_size):
# TODO: need to update this for making cross spectra.
assert isinstance(lc1, lightcurve.Lightcurve)
assert isinstance(lc2, lightcurve.Lightcurve)
if lc1.dt != lc2.dt:
raise ValueError("Light curves do not have same time binning dt.")
if lc1.tseg != lc2.tseg:
raise ValueError("Lightcurves do not have same tseg.")
if self.gti is None:
self.gti = cross_two_gtis(lc1.gti, lc2.gti)
check_gtis(self.gti)
cs_all = []
nphots1_all = []
nphots2_all = []
start_inds, end_inds = \
bin_intervals_from_gtis(self.gti, segment_size, lc1.time)
for start_ind, end_ind in zip(start_inds, end_inds):
time_1 = lc1.time[start_ind:end_ind]
counts_1 = lc1.counts[start_ind:end_ind]
time_2 = lc2.time[start_ind:end_ind]
counts_2 = lc2.counts[start_ind:end_ind]
lc1_seg = lightcurve.Lightcurve(time_1, counts_1)
lc2_seg = lightcurve.Lightcurve(time_2, counts_2)
cs_seg = Crossspectrum(lc1_seg, lc2_seg, norm=self.norm)
cs_all.append(cs_seg)
nphots1_all.append(np.sum(lc1_seg.counts))
nphots2_all.append( | np.sum(lc2_seg.counts) | numpy.sum |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Conditional Random Field layer."""
import itertools
import os
import math
import tempfile
import pytest
import numpy as np
import tensorflow as tf
from tensorflow_addons.layers.crf import CRF
from tensorflow_addons.text.crf import crf_log_likelihood
from tensorflow_addons.utils import test_utils
def get_test_data():
x = np.array(
[
[
# O B-X I-X B-Y I-Y
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0, 0.0],
],
[
# O B-X I-X B-Y I-Y
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.0, 0.0],
],
]
)
y = np.array([[1, 2, 2], [1, 1, 1]]) # B-X I-X I-X # B-X B-X B-X
return x, y
def get_test_data_extended():
logits = np.array(
[
[[0, 0, 0.5, 0.5, 0.2], [0, 0, 0.3, 0.3, 0.1], [0, 0, 0.9, 10, 1]],
[[0, 0, 0.2, 0.5, 0.2], [0, 0, 3, 0.3, 0.1], [0, 0, 0.9, 1, 1]],
]
)
tags = np.array([[2, 3, 4], [3, 2, 2]])
transitions = np.array(
[
[0.1, 0.2, 0.3, 0.4, 0.5],
[0.8, 0.3, 0.1, 0.7, 0.9],
[-0.3, 2.1, -5.6, 3.4, 4.0],
[0.2, 0.4, 0.6, -0.3, -0.4],
[1.0, 1.0, 1.0, 1.0, 1.0],
]
)
boundary_values = np.ones((5,))
crf_layer = CRF(
units=5,
use_kernel=False, # disable kernel transform
chain_initializer=tf.keras.initializers.Constant(transitions),
use_boundary=True,
boundary_initializer=tf.keras.initializers.Constant(boundary_values),
name="crf_layer",
)
return logits, tags, transitions, boundary_values, crf_layer
@pytest.mark.usefixtures("run_with_mixed_precision_policy")
def test_keras_model_inference():
logits, _, _, _, crf_layer = get_test_data_extended()
input_tensor = tf.keras.layers.Input(shape=(3, 5))
decoded_sequence, _, _, _ = crf_layer(input_tensor)
model = tf.keras.Model(input_tensor, decoded_sequence)
model.predict(logits)
model(logits).numpy()
@pytest.mark.usefixtures("run_with_mixed_precision_policy")
def test_unmasked_viterbi_decode():
x_np, y_np = get_test_data()
transitions = np.ones([5, 5])
boundary_value = np.ones(5)
layer = CRF(
units=5,
use_kernel=False, # disable kernel transform
chain_initializer=tf.keras.initializers.Constant(transitions),
use_boundary=True,
boundary_initializer=tf.keras.initializers.Constant(boundary_value),
)
decoded_sequence, _, _, _ = layer(x_np)
decoded_sequence = decoded_sequence.numpy()
np.testing.assert_equal(decoded_sequence, y_np)
assert decoded_sequence.dtype == np.int32
def unpack_data(data):
if len(data) == 2:
return data[0], data[1], None
elif len(data) == 3:
return data
else:
raise TypeError("Expected data to be a tuple of size 2 or 3.")
class ModelWithCRFLoss(tf.keras.Model):
"""Wrapper around the base model for custom training logic."""
def __init__(self, base_model):
super().__init__()
self.base_model = base_model
def call(self, inputs):
return self.base_model(inputs)
def compute_loss(self, x, y, sample_weight, training=False):
y_pred = self(x, training=training)
_, potentials, sequence_length, chain_kernel = y_pred
# we now add the CRF loss:
crf_loss = -crf_log_likelihood(potentials, y, sequence_length, chain_kernel)[0]
if sample_weight is not None:
crf_loss = crf_loss * sample_weight
return tf.reduce_mean(crf_loss), sum(self.losses)
def train_step(self, data):
x, y, sample_weight = unpack_data(data)
with tf.GradientTape() as tape:
crf_loss, internal_losses = self.compute_loss(
x, y, sample_weight, training=True
)
total_loss = crf_loss + internal_losses
gradients = tape.gradient(total_loss, self.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))
return {"crf_loss": crf_loss, "internal_losses": internal_losses}
def test_step(self, data):
x, y, sample_weight = unpack_data(data)
crf_loss, internal_losses = self.compute_loss(x, y, sample_weight)
return {"crf_loss_val": crf_loss, "internal_losses_val": internal_losses}
@pytest.mark.usefixtures("run_with_mixed_precision_policy")
def test_traing():
x_np, y_np = get_test_data()
get_some_model(x_np, y_np)
def get_some_model(x_np, y_np, sanity_check=True):
x_input = tf.keras.layers.Input(shape=x_np.shape[1:])
crf_outputs = CRF(5, name="L")(x_input)
base_model = tf.keras.Model(x_input, crf_outputs)
model = ModelWithCRFLoss(base_model)
model.compile("adam")
if sanity_check:
model.fit(x=x_np, y=y_np)
model.evaluate(x_np, y_np)
model.predict(x_np)
return model
@pytest.mark.usefixtures("run_with_mixed_precision_policy")
def test_mask_right_padding():
x_np, y_np = get_test_data()
mask = np.array([[1, 1, 1], [1, 1, 0]])
x = tf.keras.layers.Input(shape=x_np.shape[1:])
crf_layer_outputs = CRF(5)(x, mask=tf.constant(mask))
base_model = tf.keras.Model(x, crf_layer_outputs)
model = ModelWithCRFLoss(base_model)
# check shape inference
model.compile("adam")
old_weights = model.get_weights()
model.fit(x_np, y_np)
new_weights = model.get_weights()
# we check that the weights were updated during the training phase.
with pytest.raises(AssertionError):
assert_all_equal(old_weights, new_weights)
model.predict(x_np)
@pytest.mark.usefixtures("run_with_mixed_precision_policy")
def test_mask_left_padding():
x_np, y_np = get_test_data()
mask = | np.array([[0, 1, 1], [1, 1, 1]]) | numpy.array |
from tkinter import *
from tkinter import filedialog
from PIL import Image, ImageTk
from tkinter.ttk import Frame, Style
import os, time, cv2
import numpy as np
from scipy.ndimage.morphology import distance_transform_edt
import tkinter.messagebox as mbox
from our_func_cvpr18 import cly_our_func, our_func, build
from cly_instance_property import miniCOCO, SBD
from tqdm import tqdm
import tensorflow as tf
def get_next_anno_point(pred, gt, seq_points):
fndist_map = distance_transform_edt(np.pad((gt == 1) & (pred == 0), ((1, 1), (1, 1)), 'constant'))[1:-1, 1:-1]
fpdist_map = distance_transform_edt(np.pad((gt == 0) & (pred == 1), ((1, 1), (1, 1)), 'constant'))[1:-1, 1:-1]
fndist_map[seq_points[:, 1], seq_points[:, 0]], fpdist_map[seq_points[:, 1], seq_points[:, 0]] = 0, 0
[usr_map, if_pos] = [fndist_map, 1] if fndist_map.max() > fpdist_map.max() else [fpdist_map, 0]
[y_mlist, x_mlist] = np.where(usr_map == usr_map.max())
pt_next = (x_mlist[0], y_mlist[0], if_pos)
return pt_next
def get_next_anno_point_prob(pred, gt, seq_points):
fndist_map = distance_transform_edt( | np.pad((gt == 1) & (pred == 0), ((1, 1), (1, 1)), 'constant') | numpy.pad |
import argparse
import csv
import matplotlib
import matplotlib.ticker as tck
import matplotlib.pyplot as plt
import numpy as np
# Matplotlib export settings
matplotlib.use('pgf')
import matplotlib.pyplot as plt
matplotlib.rcParams.update({
'pgf.texsystem': 'pdflatex',
'font.size': 10,
'font.family': 'serif', # use serif/main font for text elements
'text.usetex': True, # use inline math for ticks
'pgf.rcfonts': False # don't setup fonts from rc parameters
})
# Main function
def main(args):
C_zero = 7.5240e-03 * 1e-6 # Farads/km
C_pos = 1.2027e-02 * 1e-6 # Farads/km
G_zero = 2.0000e-08 # Mhos/km
G_pos = 2.0000e-08 # Mhos/km
length = 300 # km
FREQ_INDEX = 0
R_ZERO_INDEX = 1
L_ZERO_INDEX = 2
R_POS_INDEX = 3
L_POS_INDEX = 4
MAGNITUDE_INDEX = 0
PHASE_INDEX = 1
# prepopulate data with a list of five empty lists
data = [[] for i in range(5)]
# Read in PSCAD .CSV data
print('*** Opening assignment 4 CSV data file...')
with open('data_assign04.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
# Read in row data
for row in csv_reader:
if line_count == 0:
print('Column names are: ' + ', '.join(row))
line_count += 1
else:
data[FREQ_INDEX].append(float(row[0]))
data[R_ZERO_INDEX].append(float(row[1])) # Ohms/km
data[L_ZERO_INDEX].append(float(row[2]) * 1e-3) # Henries/km
data[R_POS_INDEX].append(float(row[3])) # Ohms/km
data[L_POS_INDEX].append(float(row[4]) * 1e-3) # Henries/km
line_count += 1
# Figure out when break switched
print('Processed ' + str(line_count) + ' lines.')
num_data_points = len(data[FREQ_INDEX])
# Prepare values for Z(w) magnitude and phase
impedance_zero = [[],[]]
impedance_pos = [[],[]]
for index in range(num_data_points):
omega = 2*np.pi*data[FREQ_INDEX][index]
impedance_zero_val = np.sqrt((data[R_ZERO_INDEX][index] + (1j*omega*data[L_ZERO_INDEX][index]))/(G_zero + (1j*omega*C_zero)))
impedance_pos_val = np.sqrt((data[R_POS_INDEX][index] + (1j*omega*data[L_POS_INDEX][index])) /(G_pos + (1j*omega*C_pos)))
# print("F: " + str(data[FREQ_INDEX][index]))
# print("Omega: " + str(omega))
# print("R_0: " + str(data[R_ZERO_INDEX][index]))
# print("L_0: " + str(data[L_ZERO_INDEX][index]))
# print("C_0: " + str(C_zero))
# print("G_0: " + str(G_zero))
# print("R_+: " + str(data[R_POS_INDEX][index]))
# print("L_+: " + str(data[L_POS_INDEX][index]))
# print("C_+: " + str(C_pos))
# print("G_+: " + str(G_pos))
# print("Zc_0: " + str(impedance_zero_val))
# print("Zc_0 mag: " + str(np.absolute(impedance_zero_val)))
# print("Zc_+: " + str(impedance_pos_val))
# print("Zc_+ mag: " + str(np.absolute(impedance_pos_val)))
impedance_zero[MAGNITUDE_INDEX].append( | np.absolute(impedance_zero_val) | numpy.absolute |
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from SpectrogramTools import *
from CQT import *
from NMF import *
from NMFGPU import *
from NMFJoint import *
from SongAnalogies import *
def testNMFJointSynthetic():
np.random.seed(100)
N = 20
M = 100
K = 6
H = np.random.rand(K, M)
W1 = np.random.rand(N, K)
W2 = np.random.rand(N, K)
X1 = W1.dot(H)
X2 = W2.dot(H)
lambdas = [0.01]*2
plotfn = lambda Xs, Us, Vs, VStar, errs: \
plotJointNMFwGT(Xs, Us, Vs, VStar, [W1, W2], [H.T, H.T], errs)
res = doJointNMF([X1, X2], lambdas, K, tol = 0.01, Verbose = True, plotfn = plotfn)
res['X1'] = X1
res['X2'] = X2
sio.savemat("JointNMF.mat", res)
def testNMF1DConvSynthetic():
np.random.seed(100)
N = 20
M = 40
K = 3
L = 80
T = 10
V = 0*np.ones((N, M))
V[5+np.arange(T), np.arange(T)] = 1
V[5+np.arange(T), 5+np.arange(T)] = 0.5
V[15-np.arange(T), 10+np.arange(T)] = 1
V[5+np.arange(T), 20+np.arange(T)] = 1
V[15-np.arange(T), 22+np.arange(T)] = 0.5
V[5+np.arange(T), 10+np.arange(T)] += 0.7
V *= 1000
#doNMF(V, K*T, L, plotfn=plotNMFSpectra)
doNMF1DConv(V, K, T+5, L, plotfn=plotNMF1DConvSpectra)
def testNMF2DConvSynthetic():
initParallelAlgorithms()
np.random.seed(300)
N = 20
M = 40
K = 2
L = 200
T = 10
F = 5
V = 0.1*np.ones((N, M))
V[5+np.arange(T), np.arange(T)] = 1
V[8+np.arange(T), 5+np.arange(T)] = 0.5
V[15-np.arange(T), 10+np.arange(T)] = 1
V[6+np.arange(T), 20+np.arange(T)] = 1
V[10-np.arange(T), 22+np.arange(T)] = 0.5
V[10+np.arange(T), 10+np.arange(T)] += 0.7
doNMF2DConv(V, K, T, F, L, doKL = True, plotfn=plotNMF2DConvSpectra)
#doNMF1DConv(V, K, T, L, plotfn=plotNMF1DConvSpectra)
def get2DSyntheticJointExample():
T = 10
F = 10
K = 3
M = 20
N = 60
W1 = np.zeros((T, M, K))
W2 = np.zeros((T, M, K))
#Pattern 1: A tall block in A that goes to a fat block in A'
[J, I] = np.meshgrid(np.arange(2), 4+np.arange(5))
W1[J.flatten(), I.flatten(), 0] = 1
[J, I] = np.meshgrid(np.arange(5), 7+np.arange(2))
W2[J.flatten(), I.flatten(), 0] = 1
#Pattern 2: An antidiagonal line in A that goes to a diagonal line in A'
W1[np.arange(7), 9-np.arange(7), 1] = 1
W2[np.arange(7), np.arange(7), 1] = 1
#Pattern 3: A square in A that goes into a circle in A'
[J, I] = np.meshgrid(np.arange(5), 10+np.arange(5))
I = I.flatten()
J = J.flatten()
W1[0, np.arange(10), 2] = 1
W1[9, np.arange(10), 2] = 1
W1[np.arange(10), 0, 2] = 1
W1[np.arange(10), 10, 2] = 1
[J, I] = np.meshgrid(np.arange(T), np.arange(T))
I = I.flatten()
J = J.flatten()
idx = np.arange(I.size)
idx = idx[ | np.abs((I-5)**2 + (J-5)**2 - 4**2) | numpy.abs |
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
import matplotlib.widgets as wg
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from mayavi import mlab
import solutions
png_size = (1024, 768)
def exp_plot():
x = np.linspace(-2, 3, 501)
y = np.exp(x)
plt.plot(x, y)
plt.savefig("exp_plot.pdf")
plt.clf()
def statemachine():
x = np.linspace(1, 10, 10)
y = np.random.rand(10, 10)
plt.cla()
for n in y:
plt.plot(x, n)
plt.savefig("statemachine.pdf")
plt.clf()
def subplots():
x = np.linspace(-np.pi, np.pi, 400)
y1 = np.sin(x)
y2 = np.cos(x)
plt.subplot(211)
plt.plot(x, y1)
plt.subplot(212)
plt.plot(x, y2)
plt.savefig("subplots.pdf")
plt.clf()
def sinxsiny():
n = 401
x = np.linspace(-6, 6, n)
y = np.linspace(-6, 6, n)
X, Y = np.meshgrid(x, y) # returns a coordinate matrix given coordinate vectors.
C = np.sin(X) * np.sin(Y)
plt.pcolormesh(X, Y, C, edgecolors='face', shading='flat')
plt.savefig("sinxsiny.png", size=png_size)
plt.clf()
def pcolor2():
R = np.linspace(0, 2, 401)
I = R.copy()
R, I = np.meshgrid(R, I)
X = R + complex(0,1)*I
f = np.poly1d([1, 2, -1, 3])
Y = np.absolute(f(X))
plt.pcolormesh(R, I, Y, edgecolors='face', shading='flat')
plt.savefig('pcolor2.png', size=png_size)
plt.clf()
def three_d_plot():
fig = plt.figure()
ax = fig.gca(projection='3d')
x = np.linspace(-6, 6, 301)
y = x.copy()
X, Y = np.meshgrid(x, y)
Z = np.sin(X)*np.sin(Y)
ax.plot_surface(X, Y, Z)
plt.savefig("3dplot.pdf")
plt.clf()
def interact():
ax = plt.subplot(111)
plt.subplots_adjust(bottom=.25)
t = np.arange(0, 1, .001)
a0, f0 = 5, 3
s = a0*np.sin(2*np.pi*f0*t)
l = plt.plot(t, s)[0]
plt.axis([0, 1, -10, 10])
axfreq = plt.axes([.25, .05, .65, .03])
axamp = plt.axes([.25, .1, .65, .03])
sfreq = wg.Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = wg.Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
plt.draw()
sfreq.on_changed(update)
samp.on_changed(update)
plt.savefig("interact.pdf")
plt.clf()
def plot3d():
num = np.pi/1000
pts = | np.arange(0, 2*np.pi + num, num) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# tifffile.py
# Copyright (c) 2008-2014, <NAME>
# Copyright (c) 2008-2014, The Regents of the University of California
# Produced at the Laboratory for Fluorescence Dynamics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Read and write image data from and to TIFF files.
Image and meta-data can be read from TIFF, BigTIFF, OME-TIFF, STK, LSM, NIH,
ImageJ, MicroManager, FluoView, SEQ and GEL files.
Only a subset of the TIFF specification is supported, mainly uncompressed
and losslessly compressed 2**(0 to 6) bit integer, 16, 32 and 64-bit float,
grayscale and RGB(A) images, which are commonly used in bio-scientific imaging.
Specifically, reading JPEG/CCITT compressed image data or EXIF/IPTC/GPS/XMP
meta-data is not implemented. Only primary info records are read for STK,
FluoView, MicroManager, and NIH image formats.
TIFF, the Tagged Image File Format, is under the control of Adobe Systems.
BigTIFF allows for files greater than 4 GB. STK, LSM, FluoView, SEQ, GEL,
and OME-TIFF, are custom extensions defined by MetaMorph, Carl Zeiss
MicroImaging, Olympus, Media Cybernetics, Molecular Dynamics, and the Open
Microscopy Environment consortium respectively.
For command line usage run ``python tifffile.py --help``
:Author:
`<NAME> <http://www.lfd.uci.edu/~gohlke/>`_
:Organization:
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 2014.02.05
Requirements
------------
* `CPython 2.7 or 3.3 <http://www.python.org>`_
* `Numpy 1.7 <http://www.numpy.org>`_
* `Matplotlib 1.3 <http://www.matplotlib.org>`_ (optional for plotting)
* `Tifffile.c 2013.01.18 <http://www.lfd.uci.edu/~gohlke/>`_
(recommended for faster decoding of PackBits and LZW encoded strings)
Notes
-----
The API is not stable yet and might change between revisions.
Tested on little-endian platforms only.
Other Python packages and modules for reading bio-scientific TIFF files:
* `Imread <http://luispedro.org/software/imread>`_
* `PyLibTiff <http://code.google.com/p/pylibtiff>`_
* `SimpleITK <http://www.simpleitk.org>`_
* `PyLSM <https://launchpad.net/pylsm>`_
* `PyMca.TiffIO.py <http://pymca.sourceforge.net/>`_
* `BioImageXD.Readers <http://www.bioimagexd.net/>`_
* `Cellcognition.io <http://cellcognition.org/>`_
* `CellProfiler.bioformats <http://www.cellprofiler.org/>`_
Acknowledgements
----------------
* <NAME>, University of Manchester, for cz_lsm_scan_info specifics.
* <NAME> for a bug fix and some read_cz_lsm functions.
* <NAME> for help on reading MicroManager files.
References
----------
(1) TIFF 6.0 Specification and Supplements. Adobe Systems Incorporated.
http://partners.adobe.com/public/developer/tiff/
(2) TIFF File Format FAQ. http://www.awaresystems.be/imaging/tiff/faq.html
(3) MetaMorph Stack (STK) Image File Format.
http://support.meta.moleculardevices.com/docs/t10243.pdf
(4) File Format Description - LSM 5xx Release 2.0.
http://ibb.gsf.de/homepage/karsten.rodenacker/IDL/Lsmfile.doc
(5) BioFormats. http://www.loci.wisc.edu/ome/formats.html
(6) The OME-TIFF format.
http://www.openmicroscopy.org/site/support/file-formats/ome-tiff
(7) TiffDecoder.java
http://rsbweb.nih.gov/ij/developer/source/ij/io/TiffDecoder.java.html
(8) UltraQuant(r) Version 6.0 for Windows Start-Up Guide.
http://www.ultralum.com/images%20ultralum/pdf/UQStart%20Up%20Guide.pdf
(9) Micro-Manager File Formats.
http://www.micro-manager.org/wiki/Micro-Manager_File_Formats
Examples
--------
>>> data = numpy.random.rand(301, 219)
>>> imsave('temp.tif', data)
>>> image = imread('temp.tif')
>>> assert numpy.all(image == data)
>>> tif = TiffFile('test.tif')
>>> images = tif.asarray()
>>> image0 = tif[0].asarray()
>>> for page in tif:
... for tag in page.tags.values():
... t = tag.name, tag.value
... image = page.asarray()
... if page.is_rgb: pass
... if page.is_palette:
... t = page.color_map
... if page.is_stk:
... t = page.mm_uic_tags.number_planes
... if page.is_lsm:
... t = page.cz_lsm_info
>>> tif.close()
"""
from __future__ import division, print_function
import sys
import os
import re
import glob
import math
import zlib
import time
import json
import struct
import warnings
import datetime
import collections
from fractions import Fraction
from xml.etree import cElementTree as ElementTree
import numpy
__version__ = '2014.02.05'
__docformat__ = 'restructuredtext en'
__all__ = ['imsave', 'imread', 'imshow', 'TiffFile', 'TiffSequence']
def imsave(filename, data, photometric=None, planarconfig=None,
resolution=None, description=None, software='tifffile.py',
byteorder=None, bigtiff=False, compress=0, extratags=()):
"""Write image data to TIFF file.
Image data are written in one stripe per plane.
Dimensions larger than 2 or 3 (depending on photometric mode and
planar configuration) are flattened and saved as separate pages.
The 'sample_format' and 'bits_per_sample' TIFF tags are derived from
the data type.
Parameters
----------
filename : str
Name of file to write.
data : array_like
Input image. The last dimensions are assumed to be image height,
width, and samples.
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
software : str
Name of the software used to create the image.
Saved with the first page only.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
bigtiff : bool
If True, the BigTIFF format is used.
By default the standard TIFF format is used for data less than 2000 MB.
compress : int
Values from 0 to 9 controlling the level of zlib compression.
If 0, data are written uncompressed (default).
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in `value` in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
`Count` values compatible with `dtype`.
writeonce : bool
If True, the tag is written to the first page only.
Examples
--------
>>> data = numpy.ones((2, 5, 3, 301, 219), 'float32') * 0.5
>>> imsave('temp.tif', data, compress=6)
>>> data = numpy.ones((5, 301, 219, 3), 'uint8') + 127
>>> value = u'{"shape": %s}' % str(list(data.shape))
>>> imsave('temp.tif', data, extratags=[(270, 's', 0, value, True)])
"""
assert(photometric in (None, 'minisblack', 'miniswhite', 'rgb'))
assert(planarconfig in (None, 'contig', 'planar'))
assert(byteorder in (None, '<', '>'))
assert(0 <= compress <= 9)
if byteorder is None:
byteorder = '<' if sys.byteorder == 'little' else '>'
data = numpy.asarray(data, dtype=byteorder+data.dtype.char, order='C')
data_shape = shape = data.shape
data = numpy.atleast_2d(data)
if not bigtiff and data.size * data.dtype.itemsize < 2000*2**20:
bigtiff = False
offset_size = 4
tag_size = 12
numtag_format = 'H'
offset_format = 'I'
val_format = '4s'
else:
bigtiff = True
offset_size = 8
tag_size = 20
numtag_format = 'Q'
offset_format = 'Q'
val_format = '8s'
# unify shape of data
samplesperpixel = 1
extrasamples = 0
if photometric is None:
if data.ndim > 2 and (shape[-3] in (3, 4) or shape[-1] in (3, 4)):
photometric = 'rgb'
else:
photometric = 'minisblack'
if photometric == 'rgb':
if len(shape) < 3:
raise ValueError("not a RGB(A) image")
if planarconfig is None:
planarconfig = 'planar' if shape[-3] in (3, 4) else 'contig'
if planarconfig == 'contig':
if shape[-1] not in (3, 4):
raise ValueError("not a contiguous RGB(A) image")
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
if shape[-3] not in (3, 4):
raise ValueError("not a planar RGB(A) image")
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
if samplesperpixel == 4:
extrasamples = 1
elif planarconfig and len(shape) > 2:
if planarconfig == 'contig':
data = data.reshape((-1, 1) + shape[-3:])
samplesperpixel = shape[-1]
else:
data = data.reshape((-1, ) + shape[-3:] + (1, ))
samplesperpixel = shape[-3]
extrasamples = samplesperpixel - 1
else:
planarconfig = None
# remove trailing 1s
while len(shape) > 2 and shape[-1] == 1:
shape = shape[:-1]
data = data.reshape((-1, 1) + shape[-2:] + (1, ))
shape = data.shape # (pages, planes, height, width, contig samples)
bytestr = bytes if sys.version[0] == '2' else (
lambda x: bytes(x, 'utf-8') if isinstance(x, str) else x)
tifftypes = {'B': 1, 's': 2, 'H': 3, 'I': 4, '2I': 5, 'b': 6,
'h': 8, 'i': 9, 'f': 11, 'd': 12, 'Q': 16, 'q': 17}
tifftags = {
'new_subfile_type': 254, 'subfile_type': 255,
'image_width': 256, 'image_length': 257, 'bits_per_sample': 258,
'compression': 259, 'photometric': 262, 'fill_order': 266,
'document_name': 269, 'image_description': 270, 'strip_offsets': 273,
'orientation': 274, 'samples_per_pixel': 277, 'rows_per_strip': 278,
'strip_byte_counts': 279, 'x_resolution': 282, 'y_resolution': 283,
'planar_configuration': 284, 'page_name': 285, 'resolution_unit': 296,
'software': 305, 'datetime': 306, 'predictor': 317, 'color_map': 320,
'extra_samples': 338, 'sample_format': 339}
tags = [] # list of (code, ifdentry, ifdvalue, writeonce)
def pack(fmt, *val):
return struct.pack(byteorder+fmt, *val)
def addtag(code, dtype, count, value, writeonce=False):
# compute ifdentry and ifdvalue bytes from code, dtype, count, value
# append (code, ifdentry, ifdvalue, writeonce) to tags list
code = tifftags[code] if code in tifftags else int(code)
if dtype not in tifftypes:
raise ValueError("unknown dtype %s" % dtype)
tifftype = tifftypes[dtype]
rawcount = count
if dtype == 's':
value = bytestr(value) + b'\0'
count = rawcount = len(value)
value = (value, )
if len(dtype) > 1:
count *= int(dtype[:-1])
dtype = dtype[-1]
ifdentry = [pack('HH', code, tifftype),
pack(offset_format, rawcount)]
ifdvalue = None
if count == 1:
if isinstance(value, (tuple, list)):
value = value[0]
ifdentry.append(pack(val_format, pack(dtype, value)))
elif struct.calcsize(dtype) * count <= offset_size:
ifdentry.append(pack(val_format, pack(str(count)+dtype, *value)))
else:
ifdentry.append(pack(offset_format, 0))
ifdvalue = pack(str(count)+dtype, *value)
tags.append((code, b''.join(ifdentry), ifdvalue, writeonce))
def rational(arg, max_denominator=1000000):
# return nominator and denominator from float or two integers
try:
f = Fraction.from_float(arg)
except TypeError:
f = Fraction(arg[0], arg[1])
f = f.limit_denominator(max_denominator)
return f.numerator, f.denominator
if software:
addtag('software', 's', 0, software, writeonce=True)
if description:
addtag('image_description', 's', 0, description, writeonce=True)
elif shape != data_shape:
addtag('image_description', 's', 0,
"shape=(%s)" % (",".join('%i' % i for i in data_shape)),
writeonce=True)
addtag('datetime', 's', 0,
datetime.datetime.now().strftime("%Y:%m:%d %H:%M:%S"),
writeonce=True)
addtag('compression', 'H', 1, 32946 if compress else 1)
addtag('orientation', 'H', 1, 1)
addtag('image_width', 'I', 1, shape[-2])
addtag('image_length', 'I', 1, shape[-3])
addtag('new_subfile_type', 'I', 1, 0 if shape[0] == 1 else 2)
addtag('sample_format', 'H', 1,
{'u': 1, 'i': 2, 'f': 3, 'c': 6}[data.dtype.kind])
addtag('photometric', 'H', 1,
{'miniswhite': 0, 'minisblack': 1, 'rgb': 2}[photometric])
addtag('samples_per_pixel', 'H', 1, samplesperpixel)
if planarconfig:
addtag('planar_configuration', 'H', 1, 1 if planarconfig=='contig'
else 2)
addtag('bits_per_sample', 'H', samplesperpixel,
(data.dtype.itemsize * 8, ) * samplesperpixel)
else:
addtag('bits_per_sample', 'H', 1, data.dtype.itemsize * 8)
if extrasamples:
if photometric == 'rgb':
addtag('extra_samples', 'H', 1, 1) # alpha channel
else:
addtag('extra_samples', 'H', extrasamples, (0, ) * extrasamples)
if resolution:
addtag('x_resolution', '2I', 1, rational(resolution[0]))
addtag('y_resolution', '2I', 1, rational(resolution[1]))
addtag('resolution_unit', 'H', 1, 2)
addtag('rows_per_strip', 'I', 1, shape[-3])
# use one strip per plane
strip_byte_counts = (data[0, 0].size * data.dtype.itemsize, ) * shape[1]
addtag('strip_byte_counts', offset_format, shape[1], strip_byte_counts)
addtag('strip_offsets', offset_format, shape[1], (0, ) * shape[1])
# add extra tags from users
for t in extratags:
addtag(*t)
# the entries in an IFD must be sorted in ascending order by tag code
tags = sorted(tags, key=lambda x: x[0])
with open(filename, 'wb') as fh:
seek = fh.seek
tell = fh.tell
def write(arg, *args):
fh.write(pack(arg, *args) if args else arg)
write({'<': b'II', '>': b'MM'}[byteorder])
if bigtiff:
write('HHH', 43, 8, 0)
else:
write('H', 42)
ifd_offset = tell()
write(offset_format, 0) # first IFD
for pageindex in range(shape[0]):
# update pointer at ifd_offset
pos = tell()
seek(ifd_offset)
write(offset_format, pos)
seek(pos)
# write ifdentries
write(numtag_format, len(tags))
tag_offset = tell()
write(b''.join(t[1] for t in tags))
ifd_offset = tell()
write(offset_format, 0) # offset to next IFD
# write tag values and patch offsets in ifdentries, if necessary
for tagindex, tag in enumerate(tags):
if tag[2]:
pos = tell()
seek(tag_offset + tagindex*tag_size + offset_size + 4)
write(offset_format, pos)
seek(pos)
if tag[0] == 273:
strip_offsets_offset = pos
elif tag[0] == 279:
strip_byte_counts_offset = pos
write(tag[2])
# write image data
data_offset = tell()
if compress:
strip_byte_counts = []
for plane in data[pageindex]:
plane = zlib.compress(plane, compress)
strip_byte_counts.append(len(plane))
fh.write(plane)
else:
# if this fails try update Python/numpy
data[pageindex].tofile(fh)
fh.flush()
# update strip_offsets and strip_byte_counts if necessary
pos = tell()
for tagindex, tag in enumerate(tags):
if tag[0] == 273: # strip_offsets
if tag[2]:
seek(strip_offsets_offset)
strip_offset = data_offset
for size in strip_byte_counts:
write(offset_format, strip_offset)
strip_offset += size
else:
seek(tag_offset + tagindex*tag_size + offset_size + 4)
write(offset_format, data_offset)
elif tag[0] == 279: # strip_byte_counts
if compress:
if tag[2]:
seek(strip_byte_counts_offset)
for size in strip_byte_counts:
write(offset_format, size)
else:
seek(tag_offset + tagindex*tag_size +
offset_size + 4)
write(offset_format, strip_byte_counts[0])
break
seek(pos)
fh.flush()
# remove tags that should be written only once
if pageindex == 0:
tags = [t for t in tags if not t[-1]]
def imread(files, *args, **kwargs):
"""Return image data from TIFF file(s) as numpy array.
The first image series is returned if no arguments are provided.
Parameters
----------
files : str or list
File name, glob pattern, or list of file names.
key : int, slice, or sequence of page indices
Defines which pages to return as array.
series : int
Defines which series of pages in file to return as array.
multifile : bool
If True (default), OME-TIFF data may include pages from multiple files.
pattern : str
Regular expression pattern that matches axes names and indices in
file names.
Examples
--------
>>> im = imread('test.tif', 0)
>>> im.shape
(256, 256, 4)
>>> ims = imread(['test.tif', 'test.tif'])
>>> ims.shape
(2, 256, 256, 4)
"""
kwargs_file = {}
if 'multifile' in kwargs:
kwargs_file['multifile'] = kwargs['multifile']
del kwargs['multifile']
else:
kwargs_file['multifile'] = True
kwargs_seq = {}
if 'pattern' in kwargs:
kwargs_seq['pattern'] = kwargs['pattern']
del kwargs['pattern']
if isinstance(files, basestring) and any(i in files for i in '?*'):
files = glob.glob(files)
if not files:
raise ValueError('no files found')
if len(files) == 1:
files = files[0]
if isinstance(files, basestring):
with TiffFile(files, **kwargs_file) as tif:
return tif.asarray(*args, **kwargs)
else:
with TiffSequence(files, **kwargs_seq) as imseq:
return imseq.asarray(*args, **kwargs)
class lazyattr(object):
"""Lazy object attribute whose value is computed on first access."""
__slots__ = ('func', )
def __init__(self, func):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self
value = self.func(instance)
if value is NotImplemented:
return getattr(super(owner, instance), self.func.__name__)
setattr(instance, self.func.__name__, value)
return value
class TiffFile(object):
"""Read image and meta-data from TIFF, STK, LSM, and FluoView files.
TiffFile instances must be closed using the close method, which is
automatically called when using the 'with' statement.
Attributes
----------
pages : list
All TIFF pages in file.
series : list of Records(shape, dtype, axes, TiffPages)
TIFF pages with compatible shapes and types.
micromanager_metadata: dict
Extra MicroManager non-TIFF metadata in the file, if exists.
All attributes are read-only.
Examples
--------
>>> tif = TiffFile('test.tif')
... try:
... images = tif.asarray()
... except Exception as e:
... print(e)
... finally:
... tif.close()
"""
def __init__(self, arg, name=None, multifile=False):
"""Initialize instance from file.
Parameters
----------
arg : str or open file
Name of file or open file object.
The file objects are closed in TiffFile.close().
name : str
Human readable label of open file.
multifile : bool
If True, series may include pages from multiple files.
"""
if isinstance(arg, basestring):
filename = os.path.abspath(arg)
self._fh = open(filename, 'rb')
else:
filename = str(name)
self._fh = arg
self._fh.seek(0, 2)
self._fsize = self._fh.tell()
self._fh.seek(0)
self.fname = os.path.basename(filename)
self.fpath = os.path.dirname(filename)
self._tiffs = {self.fname: self} # cache of TiffFiles
self.offset_size = None
self.pages = []
self._multifile = bool(multifile)
try:
self._fromfile()
except Exception:
self._fh.close()
raise
def close(self):
"""Close open file handle(s)."""
for tif in self._tiffs.values():
if tif._fh:
tif._fh.close()
tif._fh = None
self._tiffs = {}
def _fromfile(self):
"""Read TIFF header and all page records from file."""
self._fh.seek(0)
try:
self.byteorder = {b'II': '<', b'MM': '>'}[self._fh.read(2)]
except KeyError:
raise ValueError("not a valid TIFF file")
version = struct.unpack(self.byteorder+'H', self._fh.read(2))[0]
if version == 43: # BigTiff
self.offset_size, zero = struct.unpack(self.byteorder+'HH',
self._fh.read(4))
if zero or self.offset_size != 8:
raise ValueError("not a valid BigTIFF file")
elif version == 42:
self.offset_size = 4
else:
raise ValueError("not a TIFF file")
self.pages = []
while True:
try:
page = TiffPage(self)
self.pages.append(page)
except StopIteration:
break
if not self.pages:
raise ValueError("empty TIFF file")
if self.is_micromanager:
# MicroManager files contain metadata not stored in TIFF tags.
self.micromanager_metadata = read_micromanager_metadata(self._fh)
@lazyattr
def series(self):
"""Return series of TiffPage with compatible shape and properties."""
series = []
if self.is_ome:
series = self._omeseries()
elif self.is_fluoview:
dims = {b'X': 'X', b'Y': 'Y', b'Z': 'Z', b'T': 'T',
b'WAVELENGTH': 'C', b'TIME': 'T', b'XY': 'R',
b'EVENT': 'V', b'EXPOSURE': 'L'}
mmhd = list(reversed(self.pages[0].mm_header.dimensions))
series = [Record(
axes=''.join(dims.get(i[0].strip().upper(), 'Q')
for i in mmhd if i[1] > 1),
shape=tuple(int(i[1]) for i in mmhd if i[1] > 1),
pages=self.pages, dtype=numpy.dtype(self.pages[0].dtype))]
elif self.is_lsm:
lsmi = self.pages[0].cz_lsm_info
axes = CZ_SCAN_TYPES[lsmi.scan_type]
if self.pages[0].is_rgb:
axes = axes.replace('C', '').replace('XY', 'XYC')
axes = axes[::-1]
shape = [getattr(lsmi, CZ_DIMENSIONS[i]) for i in axes]
pages = [p for p in self.pages if not p.is_reduced]
series = [Record(axes=axes, shape=shape, pages=pages,
dtype= | numpy.dtype(pages[0].dtype) | numpy.dtype |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''Tests for task decoding'''
import numpy as np
import pytest
import jams
import pumpp
# Sampling rate and hop are simple here to keep things
# divisible for inverse checks
@pytest.fixture()
def sr():
return 10
@pytest.fixture()
def hop_length():
return 1
@pytest.fixture()
def ann_tag():
ann = jams.Annotation(namespace='tag_gtzan', duration=10)
ann.append(time=0, duration=5, value='blues')
ann.append(time=1.5, duration=1.5, value='reggae')
return ann
@pytest.fixture()
def ann_vector():
ann = jams.Annotation(namespace='vector', duration=1)
ann.append(time=0, duration=0, value=np.arange(32))
return ann
@pytest.fixture()
def ann_beat():
ann = jams.Annotation(namespace='beat', duration=10)
# for n, i in enumerate(np.arange(0, 10, 0.5)):
# ann.append(time=i, duration=0, value=1 + (n % 4))
# Make up two measures of 4/4, plus two pickup beats
for t, v in [(0, -2), (0.5, -1),
(1, 1), (1.5, 2), (2, 3), (3, 4),
(3.5, 1), (4, 2), (4.5, 3), (5, 4),
(5.5, 1), (6, 2), (6.5, 3), (7, 4)]:
ann.append(time=t, duration=0, value=v)
return ann
@pytest.fixture()
def ann_chord():
ann = jams.Annotation(namespace='chord', duration=5)
for t, c in [(0, 'C'),
(1, 'C:maj'),
(2, 'D:min/3'),
(3, 'F#:7(*5)'),
(4, 'G:sus2')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture(params=[None, 0.5])
def p_self_chord(request):
return request.param
@pytest.fixture(params=[False, True])
def p_init_chord(request):
if request.param:
return np.ones(170) / 170
else:
return None
@pytest.fixture(params=[False, True])
def p_state_chord(request):
if request.param:
return np.ones(170) / 170
else:
return None
@pytest.fixture(params=[None, False, True])
def p_self_tags(request):
if request.param is None:
return None
if request.param:
return 0.5 * np.ones(10) # 10 tags in GTZAN
else:
return 0.5
@pytest.fixture(params=[False, True])
def p_init_tags(request):
if request.param:
return 0.5 * np.ones(10)
else:
return None
@pytest.fixture(params=[False, True])
def p_state_tags(request):
if request.param:
return 0.5 * np.ones(10)
else:
return None
@pytest.fixture(params=[None, False, True])
def p_self_beat(request):
if request.param is None:
return None
elif request.param:
return np.asarray([0.5, 0.0])
else:
return 0.5
@pytest.fixture(params=[None, False, True])
def p_self_down(request):
if request.param is None:
return None
elif request.param:
return np.asarray([0.5, 0.0])
else:
return 0.5
@pytest.fixture(params=[None, 0.5])
def p_init_beat(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_init_down(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_state_beat(request):
return request.param
@pytest.fixture(params=[None, 0.5])
def p_state_down(request):
return request.param
@pytest.fixture()
def ann_segment():
ann = jams.Annotation(namespace='segment_open', duration=5)
for t, c in [(0, 'A'),
(1, 'B'),
(2, 'A'),
(3, 'B'),
(4, 'C')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture()
def ann_key():
ann = jams.Annotation(namespace='key_mode', duration=5)
for t, c in [(0, 'A:major'),
(1, 'Bb:lydian'),
(2, 'A:minor'),
(3, 'B:major'),
(4, 'C:dorian')]:
ann.append(time=t, duration=1, value=c)
return ann
@pytest.fixture(params=[None, 0.5])
def p_self_key(request):
return request.param
@pytest.fixture(params=[False, True])
def p_init_key(request):
if request.param:
return np.ones(109) / 109
else:
return None
@pytest.fixture(params=[False, True])
def p_state_key(request):
if request.param:
return np.ones(109) / 109
else:
return None
def test_decode_tags_dynamic_hard(sr, hop_length, ann_tag, p_self_tags, p_init_tags, p_state_tags):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.DynamicLabelTransformer('genre', 'tag_gtzan',
hop_length=hop_length,
sr=sr,
p_self=p_self_tags,
p_init=p_init_tags,
p_state=p_state_tags)
data = tc.transform_annotation(ann_tag, ann_tag.duration)
inverse = tc.inverse(data['tags'], duration=ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_dynamic_soft(sr, hop_length, ann_tag, p_self_tags, p_init_tags, p_state_tags):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.DynamicLabelTransformer('genre', 'tag_gtzan',
hop_length=hop_length,
sr=sr,
p_self=p_self_tags,
p_init=p_init_tags,
p_state=p_state_tags)
data = tc.transform_annotation(ann_tag, ann_tag.duration)
# Soften the data, but preserve the decisions
tags_predict = 0.9 * data['tags'] + 0.1 * np.ones_like(data['tags']) / data['tags'].shape[1]
inverse = tc.inverse(tags_predict, duration=ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_static_hard(ann_tag):
tc = pumpp.task.StaticLabelTransformer('genre', 'tag_gtzan')
data = tc.transform_annotation(ann_tag, ann_tag.duration)
inverse = tc.inverse(data['tags'], ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_tags_static_soft(ann_tag):
tc = pumpp.task.StaticLabelTransformer('genre', 'tag_gtzan')
data = tc.transform_annotation(ann_tag, ann_tag.duration)
tags_predict = data['tags'] * 0.51 + 0.1
inverse = tc.inverse(tags_predict, ann_tag.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_tag.duration)
assert np.allclose(data['tags'], data2['tags'])
def test_decode_beat_hard(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat):
tc = pumpp.task.BeatTransformer('beat', sr=sr,
hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
inverse = tc.inverse(data['beat'], duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_beat_soft(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat):
tc = pumpp.task.BeatTransformer('beat', sr=sr,
hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
beat_pred = 0.9 * data['beat'] + 0.1 * np.ones_like(data['beat']) / data['beat'].shape[-1]
inverse = tc.inverse(beat_pred, duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_beat_downbeat_hard(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat,
p_self_down, p_init_down, p_state_down):
tc = pumpp.task.BeatTransformer('beat', sr=sr, hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat,
p_self_down=p_self_down,
p_init_down=p_init_down,
p_state_down=p_state_down)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
inverse = tc.inverse(data['beat'], downbeat=data['downbeat'],
duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_beat_downbeat_soft(sr, hop_length, ann_beat,
p_self_beat, p_init_beat, p_state_beat,
p_self_down, p_init_down, p_state_down):
tc = pumpp.task.BeatTransformer('beat', sr=sr, hop_length=hop_length,
p_self_beat=p_self_beat,
p_init_beat=p_init_beat,
p_state_beat=p_state_beat,
p_self_down=p_self_down,
p_init_down=p_init_down,
p_state_down=p_state_down)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
beat_pred = 0.9 * data['beat'] + 0.1 * np.ones_like(data['beat']) / data['beat'].shape[-1]
dbeat_pred = 0.9 * data['downbeat'] + 0.1 * np.ones_like(data['downbeat']) / data['downbeat'].shape[-1]
inverse = tc.inverse(beat_pred, downbeat=dbeat_pred,
duration=ann_beat.duration)
for obs in inverse:
assert 0. <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert np.allclose(data['beat'], data2['beat'])
def test_decode_vector(ann_vector):
tc = pumpp.task.VectorTransformer('cf', 'vector', 32)
data = tc.transform_annotation(ann_vector, ann_vector.duration)
inverse = tc.inverse(data['vector'], duration=ann_vector.duration)
data2 = tc.transform_annotation(inverse, ann_vector.duration)
assert np.allclose(data['vector'], data2['vector'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_chord(sr, hop_length, ann_chord):
tc = pumpp.task.ChordTransformer('chord', sr=sr, hop_length=hop_length)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['pitch'], data['root'], data['bass'],
duration=ann_chord.duration)
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['pitch'], data2['pitch'])
assert np.allclose(data['root'], data2['root'])
assert np.allclose(data['bass'], data2['bass'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_simplechord(sr, hop_length, ann_chord):
tc = pumpp.task.SimpleChordTransformer('chord', sr=sr,
hop_length=hop_length)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['pitch'], duration=ann_chord.duration)
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['pitch'], data2['pitch'])
def test_decode_chordtag_hard_dense(sr, hop_length, ann_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['chord'], duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['chord'], data2['chord'])
def test_decode_chordtag_soft_dense(sr, hop_length, ann_chord, p_self_chord, p_init_chord, p_state_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False,
p_self=p_self_chord,
p_init=p_init_chord,
p_state=p_state_chord)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
chord_predict = 0.9 * data['chord'] + 0.1 * np.ones_like(data['chord']) / data['chord'].shape[1]
inverse = tc.inverse(chord_predict, duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['chord'], data2['chord'])
def test_decode_chordtag_hard_sparse_sparse(sr, hop_length, ann_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tc = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=True)
data = tc.transform_annotation(ann_chord, ann_chord.duration)
inverse = tc.inverse(data['chord'], duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tc.transform_annotation(inverse, ann_chord.duration)
assert np.allclose(data['chord'], data2['chord'])
def test_decode_chordtag_hard_dense_sparse(sr, hop_length, ann_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tcd = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False)
tcs = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=True)
# Make a hard, dense encoding of the data
data = tcd.transform_annotation(ann_chord, ann_chord.duration)
# Invert using the sparse encoder
inverse = tcs.inverse(data['chord'], duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tcs.transform_annotation(inverse, ann_chord.duration)
dense_positions = np.where(data['chord'])[1]
sparse_positions = data2['chord'][:, 0]
assert np.allclose(dense_positions, sparse_positions)
def test_decode_chordtag_soft_dense_sparse(sr, hop_length, ann_chord, p_self_chord, p_init_chord, p_state_chord):
# This test encodes an annotation, decodes it, and then re-encodes it
# It passes if the re-encoded version matches the initial encoding
tcd = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=False,
p_self=p_self_chord,
p_init=p_init_chord,
p_state=p_state_chord)
tcs = pumpp.task.ChordTagTransformer('chord', vocab='3567s',
hop_length=hop_length,
sr=sr, sparse=True,
p_self=p_self_chord,
p_init=p_init_chord,
p_state=p_state_chord)
# Make a soft, dense encoding of the data
data = tcd.transform_annotation(ann_chord, ann_chord.duration)
chord_predict = 0.9 * data['chord'] + 0.1 * np.ones_like(data['chord']) / data['chord'].shape[1]
# Invert using the sparse encoder
inverse = tcs.inverse(chord_predict, duration=ann_chord.duration)
for obs in inverse:
assert 0 <= obs.confidence <= 1.
data2 = tcs.transform_annotation(inverse, ann_chord.duration)
dense_positions = np.where(data['chord'])[1]
sparse_positions = data2['chord'][:, 0]
assert np.allclose(dense_positions, sparse_positions)
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_structure(sr, hop_length, ann_segment):
tc = pumpp.task.StructureTransformer('struct', sr=sr,
hop_length=hop_length)
data = tc.transform_annotation(ann_segment, ann_segment.duration)
inverse = tc.inverse(data['agree'], duration=ann_segment.duration)
data2 = tc.transform_annotation(inverse, ann_segment.duration)
assert np.allclose(data['agree'], data2['agree'])
@pytest.mark.xfail(raises=NotImplementedError)
def test_decode_beatpos(sr, hop_length, ann_beat):
tc = pumpp.task.BeatPositionTransformer('beat', sr=sr,
max_divisions=12,
hop_length=hop_length)
data = tc.transform_annotation(ann_beat, ann_beat.duration)
inverse = tc.inverse(data['position'], duration=ann_beat.duration)
data2 = tc.transform_annotation(inverse, ann_beat.duration)
assert | np.allclose(data['position'], data2['position']) | numpy.allclose |
# LSTM-Autoencoder based Anomaly Detection (LAAD)
# detects abnormal RHR; uses all training data; augments 8 times the training data size.
######################################################
# Author: <NAME> #
# Email: <EMAIL> #
# Location: Dept.of Genetics, Stanford University #
# Date: Nov 26 2020 #
######################################################
#python laad_RHR_keras_v4.py --heart_rate COVID-19-Wearables/ASFODQR_hr.csv --steps COVID-19-Wearables/ASFODQR_steps.csv --myphd_id ASFODQR --symptom_date 2024-08-14
import warnings
warnings.filterwarnings('ignore')
import sys
import argparse
import copy
import numpy as np
import pandas as pd
import seaborn as sns
import itertools
from itertools import cycle
from tqdm import tqdm
import random
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from scipy import interp
from arff2pandas import a2p
from datetime import date, datetime, timedelta
from statsmodels.tsa.seasonal import seasonal_decompose
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential, load_model, save_model
from pylab import rcParams
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.ticker import FormatStrFormatter
from matplotlib import rc
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
palette = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(palette))
rcParams['figure.figsize'] = 12, 8
# as command prompts -----------------------
parser = argparse.ArgumentParser(description='Find anomalies in wearables time-series data')
parser.add_argument('--heart_rate', metavar='', help ='raw heart rate count with a header = heartrate')
parser.add_argument('--steps',metavar='', help ='raw steps count with a header = steps')
parser.add_argument('--myphd_id',metavar='', default = 'myphd_id', help ='user myphd_id')
parser.add_argument('--symptom_date', metavar='', default = 'NaN', help = 'symptom date with y-m-d format')
parser.add_argument('--random_seed', metavar='', type=int, default=42, help='random seed')
args = parser.parse_args()
# as arguments -----------------------
fitbit_oldProtocol_hr = args.heart_rate
fitbit_oldProtocol_steps = args.steps
myphd_id = args.myphd_id
symptom_date = args.symptom_date
RANDOM_SEED = args.random_seed
np.random.seed(RANDOM_SEED)
tf.random.set_seed(RANDOM_SEED)
random.seed(RANDOM_SEED)
# Hyper-parameters --------------------
TIME_STEPS = 8
EPOCHS = 1200
BATCH_SIZE = 64
VALIDATION_SPLIT = 0.05
LEARNING_RATE = 0.0001
#BASE_LINE_DAYS = 10
class LAAD:
# infer resting heart rate ------------------------------------------------------
def resting_heart_rate(self, heartrate, steps):
"""
This function uses heart rate and steps data to infer resting heart rate.
It filters the heart rate with steps that are zero and also 12 minutes ahead.
"""
# heart rate data
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = pd.to_datetime(df_hr.index)
# steps data
df_steps = pd.read_csv(fitbit_oldProtocol_steps)
df_steps = df_steps.set_index('datetime')
df_steps.index.name = None
df_steps.index = pd.to_datetime(df_steps.index)
# merge dataframes
#df_hr = df_hr.resample('1min').mean()
#df_steps = df_steps.resample('1min').mean()
df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True)
df1 = df1.resample('1min').mean()
df1 = df1.dropna()
# define RHR as the HR measurements recorded when there were zero steps taken during a rolling time window of the preceding 12 minutes (including the current minute).
df1['steps_window_12'] = df1['steps'].rolling(12).sum()
df1 = df1.loc[(df1['steps_window_12'] == 0)]
return df1
# pre-processing ------------------------------------------------------
def pre_processing(self, resting_heart_rate):
"""
It takes resting heart rate data and applies moving averages to smooth the data and
aggregates to one hour by taking the avegare values
"""
# smooth data
df_nonas = df1.dropna()
df1_rom = df_nonas.rolling(400).mean()
# resample
df1_resmp = df1_rom.resample('1H').mean()
df2 = df1_resmp.drop(['steps'], axis=1)
df2 = df2.drop(['steps_window_12'], axis=1)
#df2 = df2.resample('24H').mean()
df2 = df2.dropna()
df2 = df2.rename(columns={"heartrate": "RHR"})
return df2
# data splitting ------------------------------------------------------
def data_splitting(self, processed_data, symptom_date):
"""
It splits data into training data by taking first 10 days and the rest as testing data.
It also creates windows of pre- and post-symptomatic COVID-periods.
"""
symptom_date1 = pd.to_datetime(symptom_date)
symptom_date_before_7 = pd.to_datetime(symptom_date1) + timedelta(days=-7)
symptom_date_after_21 = pd.to_datetime(symptom_date1) + timedelta(days=21)
symptom_date_before_20 = pd.to_datetime(symptom_date1) + timedelta(days=-20)
symptom_date_before_10 = pd.to_datetime(symptom_date1) + timedelta(days=-10)
# train data
#train = processed_data[:BASE_LINE_DAYS]
processed_data = processed_data.reset_index()
processed_data['date'] = [d.date() for d in processed_data['index']]
processed_data['time'] = [d.time() for d in processed_data['index']]
processed_data = processed_data.set_index('date')
processed_data.index.name = None
processed_data.index = pd.to_datetime(processed_data.index)
# split data into train
start = processed_data.index[0]
train = processed_data[(processed_data.index.get_level_values(0) < symptom_date_before_20)]
train = train.set_index('index')
train = train.drop(['time'], axis=1)
# test data
processed_data = processed_data.reset_index()
processed_data['date'] = [d.date() for d in processed_data['index']]
processed_data['time'] = [d.time() for d in processed_data['index']]
processed_data = processed_data.set_index('date')
processed_data.index.name = None
processed_data.index = pd.to_datetime(processed_data.index)
start = processed_data.index[0]
test = processed_data[(processed_data.index.get_level_values(0) >= symptom_date_before_20)]
test = test.set_index('index')
test = test.drop(['time'], axis=1)
end = processed_data.index[-1]
# calculate delta RHR
train_reset = train.reset_index()
train_baseline_RHR = train_reset['RHR'].mean()
test_anomaly_RHR = test[symptom_date_before_7:symptom_date_after_21]
test_anomaly_delta_RHR = test_anomaly_RHR['RHR'] - train_baseline_RHR
with open(myphd_id+'_data_split_dates.csv', 'w') as f:
print("id","start_date ","symptom_date1", "symptom_date_before_20 ","symptom_date_before_7 ", "symptom_date_before_10 ", "symptom_date_after_21 ","end_date ","\n",
myphd_id, start, symptom_date, symptom_date_before_20, symptom_date_before_7, symptom_date_before_10, symptom_date_after_21, end, file=f)
return symptom_date1, symptom_date_before_20, symptom_date_before_7, symptom_date_before_10, symptom_date_after_21, train, test, test_anomaly_delta_RHR
# standardization ------------------------------------------------------
def standardization(self, train_data, test_data, symptom_date_before_20, symptom_date_before_7, symptom_date_before_10, symptom_date_after_21):
"""
It standardizes the data with zero mean and unit variance (Z-score).
We should normalize the test data using the feature summary statistics computed from the training data.
It also splits the test data into test-normal and tes-anomaly for metrics calcualtions later
It calculates delta RHR for test-anomlay data using baseline/training data
"""
# standardize train data
scaler = StandardScaler()
train_data[['RHR']] = scaler.fit_transform(train_data[['RHR']])
# standardize test data
test_data = test_data.drop(['level_0'], axis=1)
test_data[['RHR']] = scaler.transform(test_data[['RHR']])
# split data for test_normal and test_anomaly
test_anomaly = test_data[symptom_date_before_7:symptom_date_after_21]
test_normal = test_data[symptom_date_before_20:symptom_date_before_10]
all_merged = pd.concat([train_data, test_data])
#print(all_merged)
with open(myphd_id+'_data_size.csv', 'w') as f:
print("id","train ","test ", "test_normal ", "test_anomaly ","\n",
myphd_id, train_data.shape, test_data.shape, test_normal.shape, test_anomaly.shape, file=f)
return train_data, test_data, test_normal, test_anomaly, all_merged
# creating LSTM input ------------------------------------------------------
"""
Apply lag method to create subsequences by keeping the temporal order of the data constant
"""
def create_dataset(self, dataset, time_steps=1):
Xs = []
for i in range(len(dataset) - time_steps):
v = dataset.iloc[i:(i + time_steps)].values
Xs.append(v)
return np.array(Xs)
# Data Augmentation ------------------------------------------------------
"""
Applies a combination of different distortions to the data including
scaling, rotating, permutating, magnitude warping, time-warping, window slicing, window warping
"""
def augmentation(self, dataset):
def scaling(dataset, sigma=0.1):
factor = np.random.normal(loc=1., scale=sigma, size=(dataset.shape[0],dataset.shape[2]))
data_scaled = np.multiply(dataset, factor[:,np.newaxis,:])
return data_scaled
def rotation(dataset):
flip = np.random.choice([-1, 1], size=(dataset.shape[0],dataset.shape[2]))
rotate_axis = np.arange(dataset.shape[2])
np.random.shuffle(rotate_axis)
data_rotation = flip[:,np.newaxis,:] * dataset[:,:,rotate_axis]
return data_rotation
def permutation(dataset, max_segments=5, seg_mode="equal"):
orig_steps = np.arange(dataset.shape[1])
num_segs = np.random.randint(1, max_segments, size=(dataset.shape[0]))
data_permute = np.zeros_like(dataset)
for i, pat in enumerate(dataset):
if num_segs[i] > 1:
if seg_mode == "random":
split_points = np.random.choice(dataset.shape[1]-2, num_segs[i]-1, replace=False)
split_points.sort()
splits = np.split(orig_steps, split_points)
else:
splits = np.array_split(orig_steps, num_segs[i])
warp = np.concatenate(np.random.permutation(splits)).ravel()
data_permute[i] = pat[warp]
else:
data_permute[i] = pat
return data_permute
def magnitude_warp(dataset, sigma=0.2, knot=4):
from scipy.interpolate import CubicSpline
orig_steps = np.arange(dataset.shape[1])
random_warps = np.random.normal(loc=1.0, scale=sigma, size=(dataset.shape[0], knot+2, dataset.shape[2]))
warp_steps = ( | np.ones((dataset.shape[2],1)) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 29 15:35:08 2018
@author: prmiles
"""
import numpy as np
from pymcmcstat.MCMC import MCMC
from pymcmcstat.settings.DataStructure import DataStructure
from pymcmcstat.structures.ParameterSet import ParameterSet
import os
def removekey(d, key):
r = dict(d)
del r[key]
return r
# define test model function
def modelfun(xdata, theta):
m = theta[0]
b = theta[1]
nrow = xdata.shape[0]
y = np.zeros([nrow,1])
y[:,0] = m*xdata.reshape(nrow,) + b
return y
def ssfun(theta, data, local=None):
xdata = data.xdata[0]
ydata = data.ydata[0]
# eval model
ymodel = modelfun(xdata, theta)
# calc sos
ss = sum((ymodel[:,0] - ydata[:,0])**2)
return ss
def custom_ssfun(theta, data, custom=None):
return custom
def basic_mcmc():
# Initialize MCMC object
mcstat = MCMC()
# Add data
nds = 100
x = np.linspace(2, 3, num=nds)
y = 2.*x + 3. + 0.1*np.random.standard_normal(x.shape)
mcstat.data.add_data_set(x, y)
mcstat.simulation_options.define_simulation_options(nsimu = int(5.0e3), updatesigma = 1, method = 'dram', verbosity = 0)
# update model settings
mcstat.model_settings.define_model_settings(sos_function = ssfun)
mcstat.parameters.add_model_parameter(name = 'm', theta0 = 2., minimum = -10, maximum = np.inf, sample = 1)
mcstat.parameters.add_model_parameter(name = 'b', theta0 = -5., minimum = -10, maximum = 100, sample = 0)
mcstat.parameters.add_model_parameter(name = 'b2', theta0 = -5., minimum = -10, maximum = 100, sample = 1)
return mcstat
def setup_initialize_chains(CL, updatesigma = True, nsos = 1):
mcstat = setup_mcmc_case_cp()
mcstat.simulation_options.updatesigma = updatesigma
mcstat.model_settings.nsos = nsos
mcstat._MCMC__old_set = ParameterSet(theta = CL['theta'], ss = CL['ss'], prior = CL['prior'], sigma2 = CL['sigma2'])
mcstat._MCMC__chain_index = mcstat.simulation_options.nsimu - 1
mcstat._MCMC__initialize_chains(chainind = 0, nsimu = mcstat.simulation_options.nsimu, npar = mcstat.parameters.npar, nsos = mcstat.model_settings.nsos, updatesigma = mcstat.simulation_options.updatesigma, sigma2 = mcstat.model_settings.sigma2)
return mcstat
def setup_case():
mcstat = basic_mcmc()
mcstat._MCMC__chain = np.random.random_sample(size = (100,2))
mcstat._MCMC__sschain = np.random.random_sample(size = (100,2))
mcstat._MCMC__s2chain = np.random.random_sample(size = (100,2))
mcstat._covariance._R = np.array([[0.5, 0.2],[0., 0.3]])
mcstat._MCMC__chains = []
mcstat._MCMC__chains.append(dict(file = mcstat.simulation_options.chainfile, mtx = mcstat._MCMC__chain))
mcstat._MCMC__chains.append(dict(file = mcstat.simulation_options.sschainfile, mtx = mcstat._MCMC__sschain))
mcstat._MCMC__chains.append(dict(file = mcstat.simulation_options.s2chainfile, mtx = mcstat._MCMC__s2chain))
return mcstat
def setup_mcmc():
mcstat = basic_mcmc()
mcstat._initialize_simulation()
# extract components
model = mcstat.model_settings
options = mcstat.simulation_options
parameters = mcstat.parameters
data = mcstat.data
return model, options, parameters, data
def setup_mcmc_case_mh():
mcstat = basic_mcmc()
mcstat._initialize_simulation()
# extract components
sos_object = mcstat._MCMC__sos_object
prior_object = mcstat._MCMC__prior_object
parameters = mcstat.parameters
return sos_object, prior_object, parameters
def setup_mcmc_case_dr():
mcstat = basic_mcmc()
mcstat._initialize_simulation()
# extract components
model = mcstat.model_settings
options = mcstat.simulation_options
parameters = mcstat.parameters
data = mcstat.data
covariance = mcstat._covariance
rejected = {'total': 10, 'outside_bounds': 2}
chain = np.zeros([options.nsimu, 2])
s2chain = | np.zeros([options.nsimu, 1]) | numpy.zeros |
""" Sensor Identification Module
This module contains a class for choosing which irradiance sensor best
describes a PV power or current data set. We assume a linear model between
irradiance and power/current, and we use k-fold cross validation to assess
which irradiance sensor provides the best predictive power.
Generally speaking, we can try to assess a sensor's distance from an array and
its plane-of-array mismatch. Hopefully, there exists a sensor that is both
close by the array and well aligned; however, this is not always the case.
We use clear sky data to assess POA mismatch and cloudy sky data to assess
distance from array. If there is a discrepancy in which sensor is "best" under
these two data filtering schemes, the algorithm alerts the user.
"""
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression, HuberRegressor
from sklearn.model_selection import KFold, TimeSeriesSplit
def rmse(residuals):
return np.sqrt(np.average(np.power(residuals, 2)))
class SensorIdentification:
def __init__(self, data_handler_obj):
"""
This class is always instantiated with a DataHandler class instance.
This instance should have the pipeline run, with extra columns included
that identify the candidate sensors.
:param data_handler_obj: (required) a DataHandler class instance
"""
self.data_handler = data_handler_obj
self.sensor_keys = np.array(list(self.data_handler.extra_matrices.keys()))
if len(self.sensor_keys) == 0:
print("Please add sensor columns to DataHandler pipeline.")
return
self.coverage_scores = self.data_handler.extra_quality_scores
nan_masks = [~np.isnan(m[1]) for m in self.data_handler.extra_matrices.items()]
self.compare_mask = np.alltrue(np.array(nan_masks), axis=0)
# These attributes are set when running the identify method
self.results_table = None
self.chosen_sensor = None
self.consistent_answer = None
def identify(
self,
n_splits=20,
model="least-squares",
epsilon=1.35,
max_iter=100,
alpha=0.0001,
):
# least squares is about 10x faster than huber, but less robust
if model == "least-squares":
lr = LinearRegression()
elif model == "huber":
lr = HuberRegressor(epsilon=epsilon, max_iter=max_iter, alpha=alpha)
self.results_table = None
self.consistent_answer = None
self.chosen_sensor = None
filters = {
"no_errors": self.data_handler.daily_flags.no_errors,
"clear": self.data_handler.daily_flags.clear,
"cloudy": self.data_handler.daily_flags.cloudy,
}
results = pd.DataFrame(
columns=["sensor", "filter", "corr", "cv-rmse", "cv-mbe"]
)
counter = 0
for filter_key, filter in filters.items():
mask = | np.zeros_like(self.compare_mask, dtype=bool) | numpy.zeros_like |
from collections import OrderedDict
from glob import glob
import numpy as np
import os
import tqdm
import mne
from mne.io.eeglab.eeglab import _check_load_mat, _get_info
from mne.preprocessing import read_ica_eeglab
from mne.io import read_raw_eeglab
from .config import eegip_config, load_additionnal_config
from .utils import load_montage
from .path import get_path, parse_pattern
from slurppy import Config
def mark_bad_channels(raw, file_name, mark_to_remove=("manual", "rank")):
raw_eeg = _check_load_mat(file_name, None)
info, _, _ = _get_info(raw_eeg)
chan_info = raw_eeg.marks["chan_info"]
mat_chans = np.array(info["ch_names"])
assert(len(chan_info["flags"][0]) == len(mat_chans))
if len(np.array(chan_info["flags"]).shape) > 1:
ind_chan_to_drop = np.unique(np.concatenate([np.where(flags)[0] for flags, label in zip(chan_info["flags"],
chan_info["label"])
if label in mark_to_remove]))
else:
ind_chan_to_drop = np.where(chan_info["flags"])[0]
bad_chan = [chan for chan in mat_chans[ind_chan_to_drop]]
raw.info['bads'].extend(bad_chan)
def channel_rejection(raw, file_name, mark_to_remove=("manual", "rank")):
mark_bad_channels(raw, file_name, mark_to_remove)
raw.drop_channels(raw.info['bads'])
def add_bad_segment_annot(raw, file_name, mark_to_remove=("manual",)):
raw_eeg = _check_load_mat(file_name, None)
time_info = raw_eeg.marks["time_info"]
if len( | np.array(time_info["flags"]) | numpy.array |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""Generate plots of single grid point analysis.
Example::
$ python single_loc_plots.py
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import weibull_min
from scipy.optimize import curve_fit
if __name__ == '__main__':
# TODO Added convenience utils here for now, hotfix to be able to run this
from convenience_utils import hour_to_date_str, hour_to_date
from process_data import eval_single_location, heights_of_interest, analyzed_heights, analyzed_heights_ids
else:
from ..utils.convenience_utils import hour_to_date_str, hour_to_date
from .process_data import eval_single_location, heights_of_interest, analyzed_heights, analyzed_heights_ids
# Timestamps for which the wind profiles are evaluated in figure 5.
hours_wind_profile_plots = [1016833, 1016837, 1016841, 1016852, 1016876, 1016894, 1016910, 1016958]
# Starting points used for constructing Weibull fits.
curve_fit_starting_points = {
'100 m fixed': (2.28998636, 0., 9.325903),
'500 m fixed': (1.71507275, 0.62228813, 10.34787431),
'1250 m fixed': (1.82862734, 0.30115809, 10.63203257),
'300 m ceiling': (1.9782503629055668, 0.351604371, 10.447848193717771),
'500 m ceiling': (1.82726087, 0.83650295, 10.39813481),
'1000 m ceiling': (1.83612611, 1.41125279, 10.37226014),
'1250 m ceiling': (1.80324619, 2.10282164, 10.26859976),
}
# Styling settings used for making plots.
date_str_format = "%Y-%m-%d %H:%M"
color_cycle_default = plt.rcParams['axes.prop_cycle'].by_key()['color']
marker_cycle = ('s', 'x', 'o', '+', 'v', '^', '<', '>', 'D')
def plot_timeline(hours, data,
ylabel='Power [W]',
#heights_of_interest, ceiling_id, floor_id,
#data_bounds=[50, 500],
show_n_hours=24*7):
# TODO rename
"""Plot optimal height and wind speed time series for the first week of data.
Args:
hours (list): Hour timestamps.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
shift = int(1e4)
# TODO update docstring
# TODO optional time range, not only from beginning
# TODO heights_of_interest use cases fix -> height_bounds
data = data[shift:shift+show_n_hours]
dates = [hour_to_date(h) for h in hours[shift:shift+show_n_hours]]
fig, ax = plt.subplots(1, 1)
plt.subplots_adjust(bottom=.2)
# Plot the height limits.
dates_limits = [dates[0], dates[-1]]
# ceiling_height = height_bounds[1] # heights_of_interest[ceiling_id]
# floor_height = height_bounds[0] # heights_of_interest[floor_id]
# ax[0].plot(dates_limits, [ceiling_height]*2, 'k--', label='height bounds')
# ax[0].plot(dates_limits, [floor_height]*2, 'k--')
# Plot the optimal height time series.
ax.plot(dates, data, color='darkcyan')
# Plot the markers at the points for which the wind profiles are plotted
# in figure 5b.
# TODO make optional
# marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
# for i, h_id in enumerate(marker_ids):
# ax[0].plot(dates[h_id], optimal_heights[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
# markeredgewidth=2, markerfacecolor='None')
ax.set_ylabel(ylabel)
ax.set_xlabel('Time')
# ax.set_ylim([0, 800])
ax.grid()
# ax.legend()
ax.set_xlim(dates_limits)
# plt.axes(ax[1])
plt.xticks(rotation=70)
#plt.savefig('/home/s6lathim/physik/AWE/meeting/ireland/power_timeline.pdf')
def plot_figure_5a_new(hours, v_ceiling, optimal_heights,
#heights_of_interest, ceiling_id, floor_id,
height_range=None,
ref_velocity=None,
height_bounds=[200, 500],
v_bounds=[None, None],
show_n_hours=24*7):
# TODO rename, update Docstring
"""Plot optimal height and wind speed time series for the first week of data.
Args:
hours (list): Hour timestamps.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
shift = int(1e4)
# TODO optional time range, not only from beginning
# TODO heights_of_interest use cases fix -> height_bounds
optimal_heights = optimal_heights[shift:shift+show_n_hours]
if not isinstance(hours[0], np.datetime64):
dates = [hour_to_date(h) for h in hours[shift:shift+show_n_hours]]
else:
dates = hours[shift:shift+show_n_hours]
v_ceiling = v_ceiling[shift:shift+show_n_hours]
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(7, 6))
plt.subplots_adjust(bottom=.2)
# Plot the height limits.
dates_limits = [dates[0], dates[-1]]
ceiling_height = height_bounds[1] # heights_of_interest[ceiling_id]
floor_height = height_bounds[0] # heights_of_interest[floor_id]
if ceiling_height > 0:
ax[0].plot(dates_limits,
[ceiling_height]*2, 'k--',
label='height bounds')
if floor_height > 0:
ax[0].plot(dates_limits, [floor_height]*2, 'k--')
# Plot the optimal height time series.
ax[0].plot(dates, optimal_heights, color='darkcyan', label='AWES height')
if height_range is not None:
ax[0].plot(dates, height_range['min'][shift:shift+show_n_hours],
color='darkcyan', alpha=0.25)
ax[0].plot(dates, height_range['max'][shift:shift+show_n_hours],
color='darkcyan', alpha=0.25, label='max/min AWES height')
print('heights plotted...')
# Plot the markers at the points for which the wind profiles are plotted
# in figure 5b.
# TODO make optional
#marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
#for i, h_id in enumerate(marker_ids):
# ax[0].plot(dates[h_id], optimal_heights[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
# markeredgewidth=2, markerfacecolor='None')
ax[0].set_ylabel('Height [m]')
# TODO automatize ylim
ax[0].set_ylim([0, 600])
ax[0].grid()
ax[0].legend()
if ref_velocity is not None:
print(ref_velocity.shape)
ref_velocity = ref_velocity[shift:shift+show_n_hours]
ax[1].plot(dates, ref_velocity, alpha=0.5, label='@ ref. height')
print('ref velocity plotted')
if v_bounds[0] is not None:
ax[1].plot(dates_limits,
[v_bounds[0]]*2, 'k--',
label='wind speed bounds')
if v_bounds[1] is not None:
ax[1].plot(dates_limits,
[v_bounds[1]]*2, 'k--')
# Plot the optimal wind speed time series.
ax[1].plot(dates, v_ceiling, label='@ AWES height', color='darkcyan')
ax[1].legend()
#for i, h_id in enumerate(marker_ids):
# ax[1].plot(dates[h_id], v_ceiling[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
# markeredgewidth=2, markerfacecolor='None')
ax[1].set_ylabel('Wind speed [m/s]')
ax[1].grid()
ax[1].set_xlim(dates_limits)
print('wind speeds plotted...')
plt.axes(ax[1])
plt.xticks(rotation=70)
#fig.savefig('/home/s6lathim/physik/AWE/meeting/ireland/harvesting_height_wind_speed_timeline.pdf')
return dates
def plot_figure_5a(hours, v_ceiling, optimal_heights, heights_of_interest, ceiling_id, floor_id):
"""Plot optimal height and wind speed time series for the first week of data.
Args:
hours (list): Hour timestamps.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
# Only keep the first week of data from the time series.
show_n_hours = 24*7
optimal_heights = optimal_heights[:show_n_hours]
dates = [hour_to_date(h) for h in hours[:show_n_hours]]
v_ceiling = v_ceiling[:show_n_hours]
fig, ax = plt.subplots(2, 1, sharex=True)
plt.subplots_adjust(bottom=.2)
# Plot the height limits.
dates_limits = [dates[0], dates[-1]]
ceiling_height = heights_of_interest[ceiling_id]
floor_height = heights_of_interest[floor_id]
ax[0].plot(dates_limits, [ceiling_height]*2, 'k--', label='height bounds')
ax[0].plot(dates_limits, [floor_height]*2, 'k--')
# Plot the optimal height time series.
ax[0].plot(dates, optimal_heights, color='darkcyan', label='optimal height')
# Plot the markers at the points for which the wind profiles are plotted in figure 5b.
marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
for i, h_id in enumerate(marker_ids):
ax[0].plot(dates[h_id], optimal_heights[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
markeredgewidth=2, markerfacecolor='None')
ax[0].set_ylabel('Height [m]')
ax[0].set_ylim([0, 800])
ax[0].grid()
ax[0].legend()
# Plot the optimal wind speed time series.
ax[1].plot(dates, v_ceiling)
for i, h_id in enumerate(marker_ids):
ax[1].plot(dates[h_id], v_ceiling[h_id], marker_cycle[i], color=color_cycle_default[i], markersize=8,
markeredgewidth=2, markerfacecolor='None')
ax[1].set_ylabel('Wind speed [m/s]')
ax[1].grid()
ax[1].set_xlim(dates_limits)
plt.axes(ax[1])
plt.xticks(rotation=70)
def plot_figure_5b(hours, v_req_alt, v_ceiling, optimal_heights, heights_of_interest, ceiling_id, floor_id):
"""Plot vertical wind speed profiles for timestamps in `hours_wind_profile_plots`.
Args:
hours (list): Hour timestamps.
v_req_alt (ndarray): Time series of wind speeds at `heights_of_interest`.
v_ceiling (list): Optimal wind speed time series resulting from variable-height analysis.
optimal_heights (list): Time series of optimal heights corresponding to `v_ceiling`.
heights_of_interest (list): Heights above the ground at which the wind speeds are evaluated.
ceiling_id (int): Id of the ceiling height in `heights_of_interest`, as used in the variable-height analysis.
floor_id (int): Id of the floor height in `heights_of_interest`, as used in the variable-height analysis.
"""
fig, ax = plt.subplots()
# Plot the height limits.
wind_speed_limits = [0., 30.]
ceiling_height = heights_of_interest[ceiling_id]
floor_height = heights_of_interest[floor_id]
ax.plot(wind_speed_limits, [ceiling_height]*2, 'k--', label='height bounds')
ax.plot(wind_speed_limits, [floor_height]*2, 'k--')
# Plot the vertical wind profiles.
dates = [hour_to_date_str(h, date_str_format) for h in hours]
marker_ids = [list(hours).index(h) for h in hours_wind_profile_plots]
for i, h_id in enumerate(marker_ids):
ax.plot(v_req_alt[h_id, :], heights_of_interest, color=color_cycle_default[i])
ax.plot(v_ceiling[h_id], optimal_heights[h_id], '-' + marker_cycle[i], label=dates[h_id],
color=color_cycle_default[i], markersize=8, markeredgewidth=2, markerfacecolor='None')
plt.xlim(wind_speed_limits)
plt.ylim([0, 800.])
plt.ylabel('Height [m]')
plt.xlabel('Wind speed [m/s]')
plt.grid()
plt.legend(bbox_to_anchor=(1.05, 1.))
plt.subplots_adjust(right=0.65)
def fit_and_plot_weibull(wind_speeds, x_plot, line_styling, line_label, percentiles):
"""Fit Weibull distribution to histogram data and plot result. The used fitting method yielded better fits than
weibull_min.fit().
Args:
wind_speeds (list): Series of wind speeds.
x_plot (list): Wind speeds for which the Weibull fit is plotted.
format string (str): A format string for setting basic line properties.
line_label (str): Label name of line in legend.
percentiles (list): Heights above the ground at which the wind speeds are evaluated.
"""
# Perform curve fitting.
starting_point = curve_fit_starting_points.get(line_label, None)
# Actual histogram data is used to fit the Weibull distribution to.
hist, bin_edges = | np.histogram(wind_speeds, 100, range=(0., 35.)) | numpy.histogram |
#!/usr/bin/env python
'''
mcu: Modeling and Crystallographic Utilities
Copyright (C) 2019 <NAME>. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Email: <NAME> <<EMAIL>>
'''
import numpy as np
from ..utils import plot, str_format
from ..utils.misc import check_exist
from ..vasp import const
from ..cell import utils as cell_utils
from ..cell import cell
from . import qe_io
class main(cell.main, plot.main):
def __init__(self, prefix=None):
assert prefix is not None, "Provide a prefix name for your project, for example, prefix.scf.out, prefix.band.out, etc."
self.prefix = prefix
self.get_info()
############ General #################
def get_info(self, filename=None):
'''Extract basis information from the vasprun.xml'''
if filename is None:
if check_exist(self.prefix + ".scf.out"):
filename = self.prefix + ".scf.out"
else:
assert 0, "Cannot find any prefix.scf.out file"
data = qe_io.read_pw_output(filename)
self.nelec = data['nelec']
self.nbands = data['nbands']
self.soc = data['soc']
self.kpts = data['kpts'][:,:3]
self.kpts_weight = data['kpts'][:,3]
self.nkpts = self.kpts.shape[0]
self.band = data['eigenvals']
self.ispin = self.band.shape[0]
self.atom = data['atom']
self.natom = len(self.atom)
self.element = [atm[0] for atm in data['species']]
# Make a cell object in the spglib format
alat = data['alat'] * const.AUTOA
lattice = data['crystal axes'] * alat
positions = data['atom_position']
numbers = cell_utils.convert_atomtype(self.atom)
self.cell_init = (lattice, positions, numbers)
self.cell = self.cell_init
############ Plotting #################
def get_efermi(self, data=None):
'''Get Fermi energy'''
if data is None:
if check_exist(self.prefix + ".band.out"):
filename = self.prefix + ".band.out"
elif check_exist(self.prefix + ".scf.out"):
filename = self.prefix + ".scf.out"
data = qe_io.read_pw_output(filename)
if data['hoco'] is not None:
efermi = data['hoco']
elif data['efermi'] is not None:
efermi = data['efermi']
elif check_exist(self.prefix + ".scf.out"):
filename = self.prefix + ".scf.out"
data = qe_io.read_pw_output(filename)
efermi = data['efermi']
else:
print("WARNING! Fermi energy is estimated from the number of electrons. \
For metallic systems, this may be wrong")
# Take the HOMO as the E Fermi
band = data['eigenvals']
nelec = int(data['nelec'])
if band.shape[0] == 1:
npairs = nelec//2
efermi = band[0,:,:npairs].max()
else:
nspin, nkpts, nband = band.shape
band = np.sort(band.flatten())
nelecs = nelec * nkpts
efermi = band[:nelecs].max()
return efermi
def get_band(self, filename=None):
'''
if filename is not specified, band structure is obtained by searching for:
(1) self.prefix + ".band.out"
(2) self.prefix + ".scf.out"
'''
if filename is None:
if check_exist(self.prefix + ".band.out"):
filename = self.prefix + ".band.out"
elif check_exist(self.prefix + ".scf.out"):
filename = self.prefix + ".scf.out"
else:
assert 0, "Cannot find any band structure file"
# Collecting data from pw_output
data = qe_io.read_pw_output(filename)
band = data['eigenvals']
kpts = data['kpts'][:,:3] # the last column is the kpt weight
alat = data['alat'] * const.AUTOA # in angstrom
lattice = data['crystal axes'] * alat
recip_lattice = data['reciprocal axes'] * 2*np.pi / alat
# Find absolute kpts
abs_kpts = kpts.dot(recip_lattice) # From fractional to absolute in A^-1 unit
temp_kpts = np.empty_like(abs_kpts)
temp_kpts[0] = abs_kpts[0]
temp_kpts[1:] = abs_kpts[:-1]
proj_kpath = np.matrix(np.sqrt(((temp_kpts - abs_kpts)**2).sum(axis=1)).cumsum())
efermi = self.get_efermi(data)
return band, kpts, proj_kpath, recip_lattice, efermi
def get_bandgap(self, filename=None):
'''Get the bandgap'''
band, kpath_frac, proj_kpath, recip_lattice, efermi = self.get_band(filename)
nspin, nkpts, nbands = band.shape
for spin in range(nspin):
print('Spin:', spin)
CBM = None
for bandth in range(nbands):
shifted_band = band[spin,:,bandth] - efermi
if (shifted_band > 0.0).all() == True:
CBM = band[spin,:, bandth]
VBM = band[spin,:, bandth -1]
break
elif ((shifted_band < 0.0).any() == True) and ((shifted_band > 0.0).any() == True):
print("This is a metal")
break
if CBM is not None:
vbm_idx = np.argmax(VBM)
cbm_idx = np.argmin(CBM)
bandgap = CBM[cbm_idx] - VBM[vbm_idx]
direct = False
if vbm_idx == cbm_idx: direct = True
print(' E(VBM) = %7.4f at k = [%6.4f,%6.4f,%6.4f]' % (VBM[vbm_idx],
kpath_frac[vbm_idx,0], kpath_frac[vbm_idx,1], kpath_frac[vbm_idx,2]))
print(' E(CBM) = %7.4f at k = [%6.4f,%6.4f,%6.4f]' % (CBM[cbm_idx],
kpath_frac[cbm_idx,0], kpath_frac[cbm_idx,1], kpath_frac[cbm_idx,2]))
if direct == True:
print(' Direct bandgap : %6.3f' % (bandgap))
else:
print(' Indirect bandgap : %6.3f' % (bandgap))
gap1 = CBM[vbm_idx] - VBM[vbm_idx]
gap2 = CBM[cbm_idx] - VBM[cbm_idx]
direct_gap = min(gap1, gap2)
print(' Direct bandgap : %6.3f' % (direct_gap))
def _generate_band(self, filename=None, efermi=None, spin=0, klabel=None):
'''Processing/collecting the band data before the plotting function
klabel : a list of labels and corresponding coordinates for high symmetry k-points
'''
band, kpath_frac, proj_kpath, recip_lattice, efermi_ = self.get_band(filename)
if efermi is None: efermi = efermi_
band = band[spin] - efermi
# Find absolute coordinates for high symmetric kpoints
sym_kpoint_coor = None
if klabel is not None:
klabel, coor_kpts = str_format.format_klabel(klabel)
assert coor_kpts is not None, "You need to provide the coordinates for high symmetric k in the klabel"
abs_kpts = coor_kpts.dot(recip_lattice)
temp_kpts = np.empty_like(abs_kpts)
temp_kpts[0] = abs_kpts[0]
temp_kpts[1:] = abs_kpts[:-1]
sym_kpoint_coor = np.sqrt(((temp_kpts - abs_kpts)**2).sum(axis=1)).cumsum()
return band, proj_kpath, sym_kpoint_coor, klabel
def _generate_pband(self, filename=None, spin=0, gradient=False, lm='spd'):
'''Processing/collecting the projected band data before the plotting function
Note: In QE, each atom has its own set of atomic orbitals to project onto.
For example, Fe has 1s, 2s, p, and d. O only has s and p
Unlike QE, VASP decomposes the band into a list of atomic orbitals.
For example, both Fe and O have contributations from s, p, d, .... Hence,
the projected wf is sparser in VASP than in QE
proj_wf = [kpts, band, # of orbitals]
Examples for lm:
lm = 'Ni:s ; p ; d' : three groups: (1) s of Ni ; (2) all p orbitals ; (3) all d orbitals
lm = ['Ni:s,pd', 'O1:p;O2'] : two groups: (1) s,p,d of Ni ; (2) all p orbitals of the 1st O and all otbitals of O2
lm = ['Ni1;O', 'N'] : two groups: (1) the 1st Ni and all the O atoms ; (2) All N atom
if gradient == True: user has to provide a TWO groups of orbitals
for example, lm = 'Ni:s ; p' or ['Ni:s,pd', 'O1:p;O2']
'''
if filename is None:
if check_exist(self.prefix + ".projwfc.out"):
filename = self.prefix + ".projwfc.out"
else:
assert 0, "Cannot find any band structure file"
data = qe_io.read_projwfc_output(filename)
species = data['species']
l_list = data['l']
m_list = np.int64(data['m'])
proj_wf = data['projwfc'][spin]
# Create the possible lm list
lm_data = {'0': ['s'], '1':['pz', 'px', 'py'], '2':['dz2', 'dxz', 'dyz', 'dx2-y2', 'dxy']}
lm_list = []
for i, l in enumerate(l_list):
lm_list.append(lm_data[l][m_list[i] - 1])
# Generate pband
formatted_atom, formatted_lm = str_format.general_lm(lm)
if gradient:
assert len(formatted_atom) == 2, "For the gradient plot, you only need to provide two groups of orbitals, for example, lm = 's ; Ni:d' or lm = ['Ni:s', 'O']"
# Calculate total band and remove zero values
total = proj_wf.sum(axis=2)
shape = total.shape
idx_zeros = total.flatten() < 0.0001
total = total.flatten()
total[idx_zeros] = 1.0
total = total.reshape(shape)
pband = []
for i, atoms in enumerate(formatted_atom):
proj_val = 0
for j, atom in enumerate(atoms):
# Locate the atom
if atom is None:
idx_atom = np.arange(len(species))
else:
atom_, id = str_format.format_atom(atom)
assert atom_ in self.element, "This is wrong: " + atom + ". Check the lm string/list. Atom is must be in the element list: " + " ".join(self.element)
available_atom = [(n, atm) for n, atm in enumerate(self.atom) if atm == atom_]
natom = len(available_atom)
if id is not None:
assert id <= natom, "This is wrong: " + atom + ". Check the lm string/list. Atom id is must be <= " + str(natom) + " for: " + atom_
idx_atom = []
nspecies = species.count(atom_)
nwfc = nspecies // natom
count = 0
for n, atm in enumerate(species):
if atm == atom_:
if id is None:
idx_atom.append(n)
elif count // nwfc == id - 1:
idx_atom.append(n)
count += 1
# Locate the lm
idx_lm = []
for idx in idx_atom:
for each_lm in formatted_lm[i][j]:
if each_lm is None:
idx_lm.append(idx)
elif lm_list[idx] == each_lm:
idx_lm.append(idx)
proj_val += (proj_wf[:,:,idx_lm]).sum(axis=2)
pband.append(proj_val/total)
pband = np.asarray(pband)
if gradient:
pband = pband[0]/(pband.sum(axis=0))
return pband
def _generate_dos(self, prefix=None, efermi=None, spin=0, lm=None):
'''Processing/collecting the DOS data before the plotting function
TDOS dimensions: [spin , [E(eV), tdos(E)]]
spin : spin of DOS.
lm : string or a list of string, e.g. 'Ni:s' or ['Ni:s','C:s,px,pz']
'''
if lm is None:
lm = [atom+':s,p,d' for atom in self.element]
if prefix is None: prefix = self.prefix
tdos_file = prefix + ".dos"
assert check_exist(tdos_file), "Cannot find " + tdos_file
# Compute pDOS
if check_exist(prefix + ".pdos_tot"):
# Get total DOS
total_pdos_data = qe_io.read_pdos_output(prefix + ".pdos_tot")
tdos = total_pdos_data[spin,:,:2]
if efermi is None:
efermi = self.get_efermi()
# Collect the pdos files
data = qe_io.read_projwfc_output(prefix + ".projwfc.out")
species = data['species']
wfc_id = np.int64(data['wfc'])
l_list = data['l']
m_list = np.int64(data['m'])
pdos_data = []
for i, atm in enumerate(self.atom):
idx_atom = [j for j, atom in enumerate(species) if atom == atm]
wfc_idx = np.unique(wfc_id[idx_atom])
for wfc in wfc_idx:
filename = prefix + ".pdos_atm#" + str(i + 1) + "(" + atm + ")" + "_wfc#" + str(wfc)
if check_exist(filename + "(s)"):
filename = filename + "(s)"
elif check_exist(filename + "(p)"):
filename = filename + "(p)"
elif check_exist(filename + "(d)"):
filename = filename + "(d)"
lm_pdos_data = qe_io.read_pdos_output(filename)[spin]
pdos_data.append(lm_pdos_data[:,2:])
pdos_data = np.concatenate(pdos_data, axis=1)
# Create the possible lm list
lm_data = {'0': ['s'], '1':['pz', 'px', 'py'], '2':['dz2', 'dxz', 'dyz', 'dx2-y2', 'dxy']}
lm_list = []
for i, l in enumerate(l_list):
lm_list.append(lm_data[l][m_list[i] - 1])
formatted_atom, formatted_lm = str_format.general_lm(lm)
# Compute pDOS
pdos = []
for i, atoms in enumerate(formatted_atom):
proj_val = 0
for j, atom in enumerate(atoms):
# Locate the atom
if atom is None:
idx_atom = np.arange(len(species))
else:
atom_, id = str_format.format_atom(atom)
assert atom_ in self.element, "This is wrong: " + atom + ". Check the lm string/list. Atom is must be in the element list: " + " ".join(self.element)
available_atom = [(n, atm) for n, atm in enumerate(self.atom) if atm == atom_]
natom = len(available_atom)
if id is not None:
assert id <= natom, "This is wrong: " + atom + ". Check the lm string/list. Atom id is must be <= " + str(natom) + " for: " + atom_
idx_atom = []
nspecies = species.count(atom_)
nwfc = nspecies // natom
count = 0
for n, atm in enumerate(species):
if atm == atom_:
if id is None:
idx_atom.append(n)
elif count // nwfc == id - 1:
idx_atom.append(n)
count += 1
# Locate the lm
idx_lm = []
for idx in idx_atom:
for each_lm in formatted_lm[i][j]:
if each_lm is None:
idx_lm.append(idx)
elif lm_list[idx] == each_lm:
idx_lm.append(idx)
proj_val += (pdos_data[:,idx_lm]).sum(axis=1)
pdos.append(proj_val)
pdos = np.asarray(pdos).T
else:
# Get total DOS
tdos_data = qe_io.read_tdos_output(tdos_file)
tdos = tdos_data['dos'][spin,:,:2]
if efermi is None:
efermi = tdos_data['efermi']
else:
efermi = 0
pdos = None
# Shift the energy
tdos[:,0] = tdos[:,0] - efermi
return tdos, pdos
def _generate_kdos(self, prefix=None, efermi=None, spin=0, lm=None, klabel=None):
'''Processing/collecting the k-resolved DOS data before the plotting function
The kDOS will be summed over all the lm
kDOS dimensions: [spin , kpts, [E(eV), tdos(E)]]
spin : spin of DOS.
lm : string or a list of string, e.g. 'Ni:s' or ['Ni:s','C:s,px,pz']
'''
if prefix is None: prefix = self.prefix
assert check_exist(prefix + ".pdos_tot"), "Cannot find " + tdos_file
formatted_atom, formatted_lm = str_format.general_lm(lm)
assert len(formatted_atom) == 1, "For kDOS plot, you only need to provide one groups of orbitals, for example, lm = 'Ni:sp', meaning "
# Get total kDOS
total_kdos_data = qe_io.read_kdos_output(prefix + ".pdos_tot")
tdos = total_kdos_data[spin,:,:,:2]
if efermi is None:
efermi = self.get_efermi()
# Collect the pdos files
projwfc_data = qe_io.read_projwfc_output(prefix + ".projwfc.out")
site = projwfc_data['site']
species = projwfc_data['species']
wfc_id = np.int64(projwfc_data['wfc'])
l_list = projwfc_data['l']
m_list = np.int64(projwfc_data['m'])
pdos_data = []
for i, atm in enumerate(self.atom):
idx_atom = [j for j, atom in enumerate(species) if atom == atm]
wfc_idx = np.unique(wfc_id[idx_atom])
for wfc in wfc_idx:
filename = prefix + ".pdos_atm#" + str(i + 1) + "(" + atm + ")" + "_wfc#" + str(wfc)
if check_exist(filename + "(s)"):
filename = filename + "(s)"
elif check_exist(filename + "(p)"):
filename = filename + "(p)"
elif check_exist(filename + "(d)"):
filename = filename + "(d)"
lm_pdos_data = qe_io.read_kdos_output(filename)[spin]
pdos_data.append(lm_pdos_data[:,:,2:])
pdos_data = np.concatenate(pdos_data, axis=2)
# Create the possible lm list
lm_data = {'0': ['s'], '1':['pz', 'px', 'py'], '2':['dz2', 'dxz', 'dyz', 'dx2-y2', 'dxy']}
lm_list = []
for i, l in enumerate(l_list):
lm_list.append(lm_data[l][m_list[i] - 1])
# Compute kDOS
for i, atoms in enumerate(formatted_atom):
proj_val = 0
for j, atom in enumerate(atoms):
# Locate the atom
if atom is None:
idx_atom = np.arange(len(species))
else:
atom_, id = str_format.format_atom(atom)
assert atom_ in self.element, "This is wrong: " + atom + ". Check the lm string/list. Atom is must be in the element list: " + " ".join(self.element)
available_atom = [(n, atm) for n, atm in enumerate(self.atom) if atm == atom_]
natom = len(available_atom)
if id is not None:
assert id <= natom, "This is wrong: " + atom + ". Check the lm string/list. Atom id is must be <= " + str(natom) + " for: " + atom_
idx_atom = []
nspecies = species.count(atom_)
nwfc = nspecies // natom
count = 0
for n, atm in enumerate(species):
if atm == atom_:
if id is None:
idx_atom.append(n)
elif count // nwfc == id - 1:
idx_atom.append(n)
count += 1
# Locate the lm
idx_lm = []
for idx in idx_atom:
for each_lm in formatted_lm[i][j]:
if each_lm is None:
idx_lm.append(idx)
elif lm_list[idx] == each_lm:
idx_lm.append(idx)
proj_val += (pdos_data[:,:,idx_lm]).sum(axis=2)
pdos = proj_val
# Shift the energy
tdos[:,:,0] = tdos[:,:,0] - efermi
# Compute the kpath projected on 1D
kpts = projwfc_data['kpts']
lattice = self.cell[0]
recip_lattice = 2 * np.pi * np.linalg.inv(lattice).T
abs_kpts = kpts.dot(recip_lattice) # From fractional to absolute in A^-1 unit
temp_kpts = | np.empty_like(abs_kpts) | numpy.empty_like |
import numpy as np
from scipy.constants import m_p, c, e
import matplotlib.pyplot as plt
import PyHEADTAIL.particles.generators as generators
from PyHEADTAIL.trackers.transverse_tracking import TransverseMap
from PyHEADTAIL.trackers.detuners import Chromaticity, AmplitudeDetuning
def run():
def track(bunch, map_):
for i in range(n_turns):
for m in map_:
m.track(bunch)
def generate_bunch(n_macroparticles, alpha_x, alpha_y, beta_x, beta_y, alpha_0, Q_s, R):
intensity = 1.05e11
sigma_z = 0.059958
gamma = 3730.26
eta = alpha_0 - 1. / gamma**2
gamma_t = 1. / np.sqrt(alpha_0)
p0 = np.sqrt(gamma**2 - 1) * m_p * c
beta_z = eta * R / Q_s
epsn_x = 3.75e-6 # [m rad]
epsn_y = 3.75e-6 # [m rad]
epsn_z = 4 * np.pi * sigma_z**2 * p0 / (beta_z * e)
bunch = generators.generate_Gaussian6DTwiss(
macroparticlenumber=n_macroparticles, intensity=intensity, charge=e,
gamma=gamma, mass=m_p, circumference=C,
alpha_x=alpha_x, beta_x=beta_x, epsn_x=epsn_x,
alpha_y=alpha_y, beta_y=beta_y, epsn_y=epsn_y,
beta_z=beta_z, epsn_z=epsn_z)
#print bunch.sigma_z()
return bunch
# In[4]:
# Basic parameters.
n_turns = 3
n_segments = 1
n_macroparticles = 10
Q_x = 64.28
Q_y = 59.31
Q_s = 0.0020443
C = 26658.883
R = C / (2.*np.pi)
alpha_x_inj = 0.
alpha_y_inj = 0.
beta_x_inj = 66.0064
beta_y_inj = 71.5376
alpha_0 = 0.0003225
# In[5]:
# Parameters for transverse map.
s = np.arange(0, n_segments + 1) * C / n_segments
alpha_x = alpha_x_inj * np.ones(n_segments)
beta_x = beta_x_inj * np.ones(n_segments)
D_x = np.zeros(n_segments)
alpha_y = alpha_y_inj * | np.ones(n_segments) | numpy.ones |
'''
Solver for step index fiber using analytical expression of the mode profile
and solving numerically the analytical dispersion equation.
'''
import time
import numpy as np
from scipy.optimize import root
from scipy.special import jv, kn
from ..modes import Modes
from ..logger import get_logger
logger = get_logger(__name__)
from joblib import Parallel, delayed
def solve_SI(indexProfile, wl, **options):
degenerate_mode = options.get('degenerate_mode','sin')
n_jobs = options.get('n_jobs', -2)
modes = findPropagationConstants(wl,indexProfile)
modes = associateLPModeProfiles(modes,indexProfile,
degenerate_mode=degenerate_mode,
n_jobs=n_jobs)
return modes
def findPropagationConstants(wl,indexProfile, tol=1e-9):
'''
Find the propagation constants of a step index fiber by numerically finding the solution of the
scalar dispersion relation [1]_ [2]_.
Parameters
----------
wl : float
wavelength in microns.
indexProfile: IndexProfile object
object that contains data about the transverse index profile.
Returns
-------
modes : Modes object
Object containing data about the modes.
Note that it does not fill the transverse profiles, only the data about the propagation constants
and the mode numbers.
See Also
--------
associateLPModeProfiles()
Notes
-----
.. [1] <NAME>, "Fundamentals of optical waveguides"
Academic Press,
2006
.. [2] <NAME>, "Modes of step index multimode fibers"
http://wavefrontshaping.net/index.php/component/content/article/68-community/tutorials/multimode-fibers/118-modes-of-step-index-multimode-fibers
'''
lbda = wl
NA = indexProfile.NA
a = indexProfile.a
n1 = indexProfile.n1
logger.info('Finding the propagation constant of step index fiber by numerically solving the dispersion relation.')
t0 = time.time()
# Based on the dispersion relation,l eq. 3.74
# The normalized frequency, cf formula 3.20 of the book
v=(2*np.pi/lbda*NA*a)
roots = [0]
m = 0
modes = Modes()
interval = np.arange(np.spacing(10),v-np.spacing(10),v*1e-4)
while len(roots):
def root_func(u):
w=np.sqrt(v**2-u**2)
return jv(m,u)/(u*jv(m-1,u))+kn(m,w)/(w*kn(m-1,w))
guesses = np.argwhere(np.abs(np.diff(np.sign(root_func(interval)))))
froot = lambda x0: root(root_func,x0,tol = tol)
sols = map(froot, interval[guesses])
roots = [s.x for s in sols if s.success]
# remove solution outside the valid interval, round the solutions and remove duplicates
roots = np.unique([np.round(r/tol)*tol for r in roots if (r > 0 and r<v)]).tolist()
roots_num = len(roots)
if roots_num:
degeneracy = 1 if m == 0 else 2
modes.betas.extend([np.sqrt((2*np.pi/lbda*n1)**2-(r/a)**2) for r in roots]*degeneracy)
modes.u.extend(roots*degeneracy)
modes.w.extend([np.sqrt(v**2-r**2) for r in roots]*degeneracy)
modes.number += roots_num*degeneracy
modes.m.extend([m]*roots_num*degeneracy)
modes.l.extend([x+1 for x in range(roots_num)]*degeneracy)
m += 1
logger.info("Found %g modes in %0.2f seconds." % (modes.number,time.time()-t0))
return modes
def calc_mode(modes, idx, degenerate_mode, R, a, TH,
Rlessa, Rgreatera):
m = modes.m[idx]
l = modes.l[idx]
u = modes.u[idx]
w = modes.w[idx]
phase = m * TH
psi = 0
degenerated = False
if (m, l) in zip(modes.m[:idx], modes.l[:idx]):
degenerated = True
# Non-zero transverse component
if degenerate_mode == 'sin':
# two pi/2 rotated degenerate modes for m < 0
psi = np.pi/2 if m[idx] < 0 else 0
phase_mult = np.cos(phase + psi)
elif degenerate_mode == 'exp':
# noticably faster than writing exp(1j*phase)
phase_mult = | np.cos(phase) | numpy.cos |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': | np.array([-0.8211795643076093, -0.1186965077161071]) | numpy.array |
"""
Main project source file.
"""
from typing import List, Tuple
from actor import Actor
from data_plotting import plot_accumulated_actor_graph, plot_accumulated_edges_graphs
from simulator import Simulator
from graph import RoadGraph
from queue import PriorityQueue
from utils import softmax_travel_times, compute_average_over_time, MultimodalDistribution
from atis import PrevisionAtis, CurrentAtis, AdherenceAtis, Atis
from statistics import SimStats
from ipdb import set_trace
from pprint import pprint
from collections import defaultdict
from tqdm import trange
from functools import partial
import argparse
import numpy as np
import json
import os.path
import networkx as nx
PREVISION_ATIS = 1
REAL_ATIS = 2
ADHERENCE_ATIS = 3
def parse_args():
parser = argparse.ArgumentParser(
description='Systems Modelling and Simulation')
parser.add_argument("-n", "--num_actors", default=500, type=int, metavar="N",
help="number of vehicles/actors to generate per simulation run")
parser.add_argument("-r", "--runs", default=1, type=int, metavar="R", dest="n_runs",
help="number of times to run the simulation for")
parser.add_argument("-thr", "--congestion_threshold", default=0.9, type=float, metavar="THRESH",
help="threshold when to consider a link congested, volume/capacity")
parser.add_argument("-tmax", "--max_run_time", default=48.0, type=float, metavar="MAX_TIME",
dest="max_run_time", help="max time of each simulation run (in hours)")
parser.add_argument("-atis", "--atis_percentage", default=0.0, type=float, metavar="ATIS_P",
help="percentage of vehicles using the ATIS system")
parser.add_argument("-p", "--peak", type=float, nargs=2, action='append',
dest='traffic_peaks', metavar=("TPEAK_MEAN", "TPEAK_STD"),
help="mean and standard deviation of a normal distribution that represents a peak in traffic")
parser.add_argument("-o", "--out_file", type=str, default=os.path.join("src", "results", "default.json"),
dest='save_path', metavar="SAVE_PATH",
help="place to save the result of running the simulations")
parser.add_argument('-ap', '--atis-prevision', dest='used_atis', action='store_const',
const=1, help="ATIS will make use of predictions to estimate the fastest route")
parser.add_argument('-ar', '--atis-real', dest='used_atis', action='store_const',
const=2, help="ATIS will make use of real times to estimate the fastest route")
parser.add_argument('-aa', '--atis-adherence', dest='used_atis', action='store_const',
const=3, help="ATIS will make use of other atis users' data to estimate the fastest route")
parser.set_defaults(used_atis=2)
parser.add_argument("-v", "--verbose", dest='verbose', action="store_true",
help="allow helpful prints to be displayed")
parser.set_defaults(verbose=False)
parser.add_argument("-pl", "--plots", dest='plots', action="store_true",
help="display plots at the end of the simulation regarding the network occupation")
parser.set_defaults(plots=True)
return parser.parse_args()
def print_args(args):
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
pp.pprint(vars(args))
print()
def actor_constructor(use_atis_p: float, graph: RoadGraph, atis: Atis):
"""Calculate possible routes and give each one a probability based on how little time it takes to transverse it"""
possible_routes = graph.get_all_routes()
routes_times = [graph.get_optimal_route_travel_time(r)
for r in possible_routes]
routes_probs = softmax_travel_times(routes_times)
idx = np.random.choice(len(possible_routes), p=routes_probs)
return Actor(
possible_routes[idx],
np.random.choice([atis, None], p=[use_atis_p, 1-use_atis_p]))
def atis_constructor(used_atis: bool, use_atis_p: float, num_actors: int, graph: RoadGraph, traffic_dist: MultimodalDistribution, events: PriorityQueue):
# print("Created ATIS")
switcher = {
PREVISION_ATIS: PrevisionAtis(graph, use_atis_p, traffic_dist, num_actors),
REAL_ATIS: CurrentAtis(graph, use_atis_p),
ADHERENCE_ATIS: AdherenceAtis(graph, use_atis_p, events)
}
return switcher.get(used_atis, "Invalid Atis")
def stats_constructor(graph: RoadGraph):
# print("Created STATS")
return SimStats(graph)
def statistics_print(sim: Simulator):
"""Print of simulation statistics regarding ATIS and non ATIS users"""
print()
atis_yes, atis_no = [], []
for a in sim.actors:
if a.atis is not None:
atis_yes.append(a.total_travel_time)
else:
atis_no.append(a.total_travel_time)
print("ATIS YES: mean: %f || std: %f" %
(np.mean(atis_yes), | np.std(atis_yes) | numpy.std |
import numpy as np
import os
import argparse
prefix = './data/'
np.random.seed(0)
def load_data(args):
n_user, n_item, train_data, eval_data, test_data = load_rating(args)
n_entity, n_relation, adj_entity, adj_relation = load_kg(args)
print('data loaded.')
return n_user, n_item, n_entity, n_relation, train_data, eval_data, test_data, adj_entity, adj_relation
def load_rating(args):
print('reading rating file ...')
# reading rating file
rating_file = prefix + args.dataset + '/ratings_final'
if os.path.exists(rating_file + '.npy'):
rating_np = np.load(rating_file + '.npy')
else:
rating_np = np.loadtxt(rating_file + '.txt', dtype=np.int64)
np.save(rating_file + '.npy', rating_np)
n_user = len(set(rating_np[:, 0]))
n_item = len(set(rating_np[:, 1]))
train_data, eval_data, test_data = dataset_split(rating_np, args)
return n_user, n_item, train_data, eval_data, test_data
# return n_item, train_data
def dataset_split(rating_np, args):
print('splitting dataset ...')
# train:eval:test = 6:2:2
eval_ratio = 0.2
test_ratio = 0.2
n_ratings = rating_np.shape[0]
eval_indices = np.random.choice(list(range(n_ratings)), size=int(n_ratings * eval_ratio), replace=False)
left = set(range(n_ratings)) - set(eval_indices)
test_indices = np.random.choice(list(left), size=int(n_ratings * test_ratio), replace=False)
train_indices = list(left - set(test_indices))
if args.ratio < 1:
train_indices = np.random.choice(list(train_indices), size=int(len(train_indices) * args.ratio), replace=False)
train_data = rating_np[train_indices]
eval_data = rating_np[eval_indices]
test_data = rating_np[test_indices]
return train_data, eval_data, test_data
def load_kg(args):
print('reading KG file ...')
# reading kg file
kg_file = prefix + args.dataset + '/kg_final'
if os.path.exists(kg_file + '.npy'):
kg_np = np.load(kg_file + '.npy')
else:
kg_np = np.loadtxt(kg_file + '.txt', dtype=np.int64)
np.save(kg_file + '.npy', kg_np)
n_entity = len(set(kg_np[:, 0]) | set(kg_np[:, 2]))
n_relation = len(set(kg_np[:, 1]))
kg = construct_kg(kg_np)
adj_row, adj_col, adj_relation = construct_adj(args, kg, n_entity)
return n_entity, n_relation, (adj_row, adj_col), adj_relation
def construct_kg(kg_np):
print('constructing knowledge graph ...')
kg = dict()
for triple in kg_np:
head = triple[0]
relation = triple[1] + 1
tail = triple[2]
# treat the KG as an undirected graph
if head not in kg:
kg[head] = []
kg[head].append((tail, relation))
if tail not in kg:
kg[tail] = []
kg[tail].append((head, relation))
return kg
def construct_adj(args, kg, entity_num):
print('constructing adjacency matrix ...')
# each line of adj_entity stores the sampled neighbor entities for a given entity
# each line of adj_relation stores the corresponding sampled neighbor relations
# adj_entity = np.zeros([entity_num, args.neighbor_sample_size], dtype=np.int64)
# adj_relation = np.zeros([entity_num, args.neighbor_sample_size], dtype=np.int64)
adj_row = []
adj_col = []
adj_relation = []
for entity in range(entity_num):
neighbors = kg[entity]
n_neighbors = len(neighbors)
# if n_neighbors >= args.neighbor_sample_size:
# sampled_indices = np.random.choice(list(range(n_neighbors)), size=args.neighbor_sample_size, replace=False)
# else:
# sampled_indices = np.random.choice(list(range(n_neighbors)), size=args.neighbor_sample_size, replace=True)
#
# adj_entity[entity] = np.array([neighbors[i][0] for i in sampled_indices])
# adj_relation[entity] = np.array([neighbors[i][1] for i in sampled_indices])
adj_row.extend([entity] * n_neighbors)
adj_col.extend([neighbors[i][0] for i in range(n_neighbors)])
adj_relation.extend([neighbors[i][1] for i in range(n_neighbors)])
return np.array(adj_row), np.array(adj_col), np.array(adj_relation)
#
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument('--dataset', default='movie')
# parser.add_argument('--ratio', default=1)
# parser.add_argument('--neighbor_sample_size', default=8)
# args = parser.parse_args()
# load_kg(args)
# i = 1
def load_kg_ver0(args):
print('reading KG file ...')
# reading kg file
kg_file = prefix + args.dataset + '/kg_final'
if os.path.exists(kg_file + '.npy'):
kg_np = np.load(kg_file + '.npy')
else:
kg_np = | np.loadtxt(kg_file + '.txt', dtype=np.int64) | numpy.loadtxt |
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colors as mcolors
import sys
from os.path import dirname, realpath
pypath = dirname(dirname(dirname(realpath(__file__)))) + '/python/'
sys.path.insert(1, pypath)
import utils
from odes import scara_2dbint, scara
# # Read the specification file
dirpath = dirname(realpath(__file__))
spec = "gb2"
specfile = '/' + spec + ".txt"
dba = utils.read_spec_from_txt(dirpath+specfile)
# # Read the controller files
tag = []
pavings = []
ctlr = []
for k in range(dba.n_dba):
w = "/controller_" + spec + "_w" + str(k) + ".h5"
tau, X, U, _, _, p, t, c = utils.read_controller_itvl_from_h5(dirpath+w)
tag.append(t)
pavings.append(p)
ctlr.append(c)
controller = utils.CtlrItvl(U, pavings, tag, ctlr)
model = scara(tau)
# # Compute the percentage of winning set on the state space
winset = pavings[dba.q0][np.argwhere(tag[dba.q0] == 1).squeeze()]
print("\nWinning set coverage:")
wsize = np.sum((winset[:, 1]-winset[:, 0])
* (winset[:, 3]-winset[:, 2])
* (winset[:, 5]-winset[:, 4])
* (winset[:, 7]-winset[:, 6]))
winper = "{:.2%}".format(
wsize/((X[0, 1]-X[0, 0])*(X[1, 1]-X[1, 0])
* (X[2, 1]-X[2, 0])*(X[3, 1]-X[3, 0]))
)
print(winper)
# # Set up workspace
nG = 2
G = np.zeros(shape=(4, 2, 2))
G[:, :, 0] = np.array([[0.4980, 0.5772], [1.5739, 1.7055],
[-0.1, 0.1], [-0.1, 0.1]])
G[:, :, 1] = np.array([[0.4903, 0.6069], [-0.9363, -0.8363],
[-0.1, 0.1], [-0.1, 0.1]])
A = pavings[dba.q0][np.argwhere(tag[dba.q0] == -1).squeeze(), :]
def get_labels(x, G, A):
if(x[0] > G[0, 0, 0] and x[0] < G[0, 1, 0] and
x[1] > G[1, 0, 0] and x[1] < G[1, 1, 0]):
return 1
elif(x[0] > G[0, 0, 1] and x[0] < G[0, 1, 1] and
x[1] > G[1, 0, 1] and x[1] < G[1, 1, 1]):
return 2
elif(utils.index_in_interval_array(x, A).size > 0):
return -1
else:
return 0
# # Simulation
rng = np.random.default_rng()
Tsim = 50
x0 = np.array([0.0, 0.0, 0.0, 0.0])
t = 0
x = x0
q = dba.q0
tsim = []
xsim = []
usim = []
qsim = []
torqsim = []
lsim = []
while(t < Tsim):
x_id = utils.index_in_interval_array(x, pavings[q])
if(x_id < 0):
print("System state ")
print(x)
print(" is not inside the winning set.")
break
if(q < 0):
print("System unsafe.")
break
if(any(ctlr[q][x_id, :])):
uset = np.argwhere(ctlr[q][x_id, :]).squeeze() # get the indices of valid input
else:
print("No valid control input.")
break
if(uset.size > 1):
if(t < tau):
uid = rng.choice(uset, 1) # randomly pick one
else:
uid = uset[np.argmin(np.linalg.norm(U[uset, :]-upre, axis=1))]
else:
uid = int(uset)
u = U[uid, :].squeeze()
upre = u
# Calculate torque
torq = model.compute_torque(np.atleast_2d(u).T, np.atleast_2d(x[2:4]).T,
np.atleast_2d(x[0:2]).T)
# Integrate ode
sol = solve_ivp(scara_2dbint, [0, tau], x, method='RK45', args=(u,))
tt = sol.t[-1]
y = sol.y[:, -1]
# Save trajectories
tsim.append(t)
xsim.append(x)
usim.append(u)
qsim.append(q)
torqsim.append(torq.astype(float).T.squeeze())
lsim.append(get_labels(x, G, A))
# Update state
q = dba.q_prime[q, get_labels(x, G, A)]
x = y
t += tt
xsim = np.asarray(xsim)
usim = np.asarray(usim)
qsim = np.asarray(qsim)
tsim = | np.asarray(tsim) | numpy.asarray |
import argparse
import os
from importlib import import_module
from abp.configs import NetworkConfig, ReinforceConfig, EvaluationConfig
from abp.examples.pysc2.tug_of_war.device_handler import get_default_device, to_device, DeviceDataLoader
import torch
import numpy as np
import torch.nn as nn
from abp.adaptives import TransAdaptive
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.dataloader import DataLoader
from torch.optim import Adam
def pre_process(data, output_indexes, output_shape):
np_data = np.array(data)
data_input = np.stack(np_data[:,0])
data_output = np.stack(np_data[:,1])
nexus_idx = output_indexes
data_output = data_output[:,nexus_idx]
data_x = normalize(data_input, output_indexes, output_shape)
data_y = normalize(data_output, output_indexes, output_shape)
tensor_x = torch.stack([torch.Tensor(i) for i in data_x])
tensor_y = torch.stack([torch.Tensor(i) for i in data_y])
tensor_dataset = torch.utils.data.TensorDataset(tensor_x, tensor_y)
return tensor_dataset
def normalize(np_data, output_indexes, output_shape):
norm_vector_input = np.array([700, 50, 40, 20, 50, 40, 20, 3,
50, 40, 20, 50, 40, 20, 3,
50, 40, 20, 50, 40, 20,
50, 40, 20, 50, 40, 20,
2000, 2000, 2000, 2000, 40])
norm_vector_output = norm_vector_input[output_indexes]
if len(np_data[0]) == output_shape:
return np_data / norm_vector_output
else:
return np_data / norm_vector_input
def calculate_baseline(data, val_indices):
test_data = [data[i] for i in val_indices]
val_set = np.array(test_data)
baseline = np.stack(val_set[:, 0])
idx = [4, 9]
baseline_hp = baseline[:, idx]
bl_next_state_reward = np.stack(val_set[:, 1])
mse_baseline = ((baseline_hp - bl_next_state_reward)**2).mean(axis=None)
print(mse_baseline)
return mse_baseline
def split_data(dataset, val_pct):
# Determine size of validation set
n_val = int(val_pct*dataset)
# Create random permutation of 0 to n-1
idxs = np.random.permutation(dataset)
# Pick first n_val indices for validation set
return idxs[n_val:], idxs[:n_val]
def load_data(tensor_dataset, indices, batch_size):
sampler = SubsetRandomSampler(indices)
data_loaded = DataLoader(tensor_dataset, batch_size, sampler=sampler)
return data_loaded
def run_task(evaluation_config, network_config, reinforce_config):
trans_model = TransAdaptive(name= "TugOfWar2lNexusHealth",
network_config=network_config,
reinforce_config = reinforce_config)
| np.set_printoptions(suppress=True) | numpy.set_printoptions |
# coding: utf-8
import cmath
import numpy as np
from rcwa.common import matmul, redheffer_star_prod, get_input
from rcwa.structure import HomogeneousStructure
from rcwa.source import Source
from rcwa._constants import UNIT_MAT_2D
def save_outputs(R, T):
with open('output.toml', 'w') as fid:
fid.write('[R]\n00 = {:.4f}\n'.format(R))
fid.write('[T]\n00 = {:.4f}\n'.format(T))
fid.write('[R_T]\n00 = {:.4f}\n'.format(R + T))
class TMM():
'''Calculates transmission through a stack of uniform layers'''
def __prepare(self, structure, source):
nr1 = np.sqrt(structure.UR1*structure.ER1)
self.k_inc = source.K0*nr1*\
np.array(([np.sin(source.THETA)*np.cos(source.PHI),
np.sin(source.THETA)*np.sin(source.PHI), np.cos(source.THETA)]))
S_global = np.array(([0, 0, 1, 0], [0, 0, 0, 1],
[1, 0, 0, 0], [0, 1, 0, 0]))
return S_global
def compute(self, structure, source):
S_global = self.__prepare(structure, source)
S_global = self.__compute_layers(structure, source, S_global)
S_global = self.__compute_superstrate(structure, S_global)
S_global = self.__compute_substrate(structure, S_global)
R, T = self.__get_R_T(structure, source, S_global)
return R, T
def __compute_layers(self, structure, source, S_global):
kx, ky = self.k_inc[0], self.k_inc[1]
# take layers into account
for i in range(0, structure.num_layers):
ur = structure.ur_vec[i]
er = structure.er_vec[i]
l = structure.layer_thicknesses_vec[i]
s_layer_mat = self.__calc_s_mat(l, ur, er, kx, ky, source.K0)
S_global = redheffer_star_prod(S_global, s_layer_mat, UNIT_MAT_2D)
return S_global
@staticmethod
def __calc_gap_layer_params(kx, ky):
ur = 1
er = 1
q_mat = np.array(([kx*ky, ur*er+ky*ky], [-(ur*er+kx*kx), -kx*ky]))/ur
v_mat = -1j*q_mat
return v_mat
@staticmethod
def __calc_layer_params(ur, er, kx, ky):
q_mat = np.array(([kx*ky, ur*er-kx*kx], [ky*ky-ur*er, -kx*ky]))/ur
kz = cmath.sqrt(ur*er-kx*kx-ky*ky)
omega_mat = 1j*kz*np.array(([1, 0], [0, 1]))
v_mat = np.matmul(q_mat, np.linalg.inv(omega_mat))
return omega_mat, v_mat
def __calc_s_mat(self, layer_thickness, ur, er, kx, ky, K0):
omegai_mat, vi_mat = self.__calc_layer_params(ur, er, kx, ky)
vg_mat = self.__calc_gap_layer_params(kx, ky)
ai_mat = UNIT_MAT_2D + np.matmul(np.linalg.inv(vi_mat), vg_mat)
bi_mat = UNIT_MAT_2D - np.matmul(np.linalg.inv(vi_mat), vg_mat)
xi_mat = np.diag(np.exp(np.diag(omegai_mat)*K0*layer_thickness))
ai_inv_mat = np.linalg.inv(ai_mat)
di_mat = ai_mat - matmul(xi_mat, bi_mat, ai_inv_mat, xi_mat, bi_mat)
di_inv_mat = np.linalg.inv(di_mat)
s_11_mat = matmul(di_inv_mat, matmul(
xi_mat, bi_mat, ai_inv_mat, xi_mat, ai_mat) - bi_mat)
s_12_mat = matmul(di_inv_mat, xi_mat, ai_mat
- matmul(bi_mat, ai_inv_mat, bi_mat))
# S_12 = S_21, S_11 = S_22
s_mat = np.concatenate((np.concatenate((s_11_mat, s_12_mat), axis=1),
| np.concatenate((s_12_mat, s_11_mat), axis=1) | numpy.concatenate |
import torch
import random
import numpy as np
import time
class PuzzleN:
def __init__(self, N):
assert N in [3, 8, 15, 24, 35, 48], "N must be valid"
self.N = N
self.rowLength = int((N + 1) ** 0.5)
self.actions = {
"U": torch.tensor([1, 0]),
"R": torch.tensor([0, -1]),
"D": torch.tensor([-1, 0]),
"L": torch.tensor([0, 1]),
}
self.state = self.getSolvedState()
self.solvedState = self.getSolvedState()
self.manDistMat = self.generateManDistMat()
def getSolvedState(self):
state = []
for i in range(self.rowLength):
state.append(
[n + self.rowLength * i for n in range(1, self.rowLength + 1)])
state[-1][-1] = 0
return torch.tensor(state, dtype=torch.uint8)
def checkIfSolved(self, states):
goals = torch.all(states == self.solvedState, 2)
goals = goals.all(1)
return goals
def checkIfSolvedSingle(self, state):
return torch.equal(state, self.solvedState)
def nextState(self, states, actions):
stateIdxs, missingY, missingX = torch.where(states == 0)
missing = torch.stack((missingY, missingX), 1)
movingSquare = missing + torch.stack(
[self.actions[action] for action in actions]
)
movingSquare = torch.cat((stateIdxs.unsqueeze(1), movingSquare), 1)
missing = torch.cat((stateIdxs.unsqueeze(1), missing), 1)
invalids = missing[
torch.any(
(movingSquare[:, 1:] >= self.rowLength) | (
movingSquare[:, 1:] < 0), 1
)
][:, 0]
missing = missing[
torch.all(
(movingSquare[:, 1:] < self.rowLength) & (
movingSquare[:, 1:] >= 0), 1
)
]
movingSquare = movingSquare[
torch.all(
(movingSquare[:, 1:] < self.rowLength) & (
movingSquare[:, 1:] >= 0), 1
)
]
stateIdxs, missingY, missingX = missing[:,
0], missing[:, 1], missing[:, 2]
movingSquareY, movingSquareX = movingSquare[:, 1], movingSquare[:, 2]
states[stateIdxs, missingY, missingX] = states[
stateIdxs, movingSquareY, movingSquareX
]
states[stateIdxs, movingSquareY, movingSquareX] = 0
return states, stateIdxs, invalids
def doAction(self, action, state=None):
assert action in self.actions
if state is None:
state = self.state
missing = torch.tensor(torch.where(state == 0))
movingSquare = missing + self.actions[action]
if self.validAction(movingSquare):
state[tuple(missing)], state[tuple(movingSquare)], = (
state[tuple(movingSquare)],
0,
)
return state
def validAction(self, movedSquare):
return (
0 <= movedSquare[0] < self.rowLength
and 0 <= movedSquare[1] < self.rowLength
)
def generateScramble(self, noMoves):
state = self.solvedState.clone()
missing = [self.rowLength - 1, self.rowLength - 1]
scramble = []
movesDone = 0
while movesDone < noMoves:
randomMove = random.choice(list(self.actions.values()))
movingSquare = [sum(x) for x in zip(missing, randomMove)]
if self.validAction(movingSquare):
movesDone += 1
state[tuple(missing)], state[tuple(movingSquare)], = (
state[tuple(movingSquare)],
0,
)
missing = movingSquare
return state
def generateScrambles(self, numStates, maxScrambles, minScrambles=0):
states = self.solvedState.repeat(numStates, 1, 1)
scrambleNums = np.random.randint(minScrambles, maxScrambles + 1, numStates)
numMoves = np.zeros(numStates)
while | np.max(numMoves < scrambleNums) | numpy.max |
import numpy as np
import matplotlib.pyplot as plt
import math
import matplotlib.patches as patches
import matplotlib.animation as animation
import gym
import random
from datetime import date
float_formatter = "{:.2f}".format
np.set_printoptions(formatter={'float_kind':float_formatter})
import numpy as np
#%matplotlib qt
import matplotlib.pyplot as plt
import math
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
import matplotlib.animation as animation
from matplotlib.offsetbox import TextArea, DrawingArea, OffsetImage, AnnotationBbox
import matplotlib.image as mpimg
float_formatter = "{:.2f}".format
np.set_printoptions(formatter={'float_kind':float_formatter})
class UnbreakableSeaweed(gym.Env):
"""
Description:
A one D model of Aplysia californica feeding.
The goal is to ingest the most edible food
Source:
This enviornment cooresponds to the model of Aplysia feeding presented in
Control for Multifunctionality: Bioinspired Control Based on Feeding in Aplysia californica
2
Observation (7-element):
Type:
Num Observation Min Max
0 x_h 0 1
1 x_g 0 1
2 force_on_object -Inf Inf
3 pressure_grasper -Inf Inf
4 pressure_jaws -Inf Inf
5 edible -1 1
6 grasper_friction_state 0 1
Actions (32-element):
Type: 5 element array
element 0 - B7 state
element 1 - B6/B9/B3 state
element 2 - B 8a/b state
element 3 - B31/B32 state
element 4 - B38 state
Control frequency: 20 Hz
Reward:
Reward is proportional to the amount of seaweed ingested
Episode Termination:
Episode is greater than max_steps_per_iteration. Default: 1000
"""
##properties for visualization
#define the location of the ground plane
x_ground = np.array([[0],[0]])
len_ground_line = 5
#define the location of the force transducer
x_transducer = x_ground + np.array([[8],[0]])
len_transducer_line = 5
#define location and shape of head
x_H = x_ground + np.array([[0],[0]])
x_H_width = 1
x_H_height = 4
#define the extents of the grasper protraction/retraction path
grasper_origin = x_H + np.array([[0],[0]])
grasper_full = grasper_origin + np.array([[1],[0]])
#define the starting position for the bottom of the grasper along this track
x_R = grasper_origin + np.array([[0],[0]])
#specify vectors based on the grasper in the upright position
theta_grasper_initial = math.pi/2
#specify the grasper radius
r_grasper = 1
grasper_offset = 1
#define the positions of the I2 muscle origins
x_I2_Borigin = grasper_origin + np.array([[0],[0]])
x_I2_Aorigin = grasper_origin + np.array([[0],[2*r_grasper]])
#define the position of the hinge origin
x_hinge_origin = grasper_origin + np.array([[0],[0]])
#specify the angle relative to horizontal for each of the attachment points fixed on the grasper surface
theta_s = 0
theta_I2_A = math.pi/6
theta_I2_B = math.pi/6
#plot line representing ground
line_ground =[]
#plot a dot at the origin
dot_ground =[]
#plot line representing force transducer
line_transducer =[]
#plot line representing track
line_grapser_track =[]
#plot line from R to G
line_RG =[]
#plot dot at point R
dot_R =[]
#plot dot at point G
dot_G =[]
#plot dot at point S
dot_S =[]
#plot dot at I2 attachment point A
dot_I2_A =[]
#plot dot at I2 attachment point B
dot_I2_B =[]
#plot dot at I2 attachment point A
dot_I2_Aorigin =[]
#plot dot at I2 attachment point B
dot_I2_Borigin =[]
#draw grasper
draw_circle =[]
#draw head
head =[]
dot_H_spring =[]
#draw head spring as dashed line
line_H_spring =[]
#draw grasper to head spring as dashed line
line_G_spring =[]
preset_inputs = 0
generat_plots_toggle = 0
init_reward = 0.0
init_force_level = 'low'
high_threshold = 4
low_threshold = 40
output_expert_mean = np.load('output_expert_mean.npy')
output_expert_std = np.load('output_expert_std.npy')
def __init__(self, foo=0, max_steps=1000, threshold=-1000.0, delay=1, patience = 20, cr_threshold = -1000):
self.output_expert_mean = np.load('output_expert_mean.npy')
self.output_expert_std = np.load('output_expert_std.npy')
self.biomechanicsModel = 1
self.generat_plots_toggle = 0
self.verbose = 0
self.cr_threshold = cr_threshold
self.patience = patience
self.delta_gm = 0
self.idle_count = 0
self.gfs = 0
self.foo = foo
self.threshold = threshold
self.total_reward = 0
self.total_reward_log = [self.total_reward]
self.reward_range = (-1e6, 1e6)
self.P_I4 = 0
self.A_I4 = 0.05
self.P_I3_anterior = 0
self.A_I3_anterior = 0.05
self.T_I3 = 0.05
self.A_I3 = 0.05
self.T_I2 = 0.05
self.A_I2 = 0.05
self.T_hinge = 0
self.A_hinge = 0.05
self.x_h = 0.0
self.x_g = 0.0
self.force_on_object = 0
#Friction coefficients
self.mu_s_g = 0.4 #mu_s coefficient of static friction at grasper
self.mu_k_g = 0.3 #mu_k coefficient of kinetic friction at grasper
self.mu_s_h = 0.3 #mu_s coefficient of static friction at jaws
self.mu_k_h = 0.3 #mu_k coefficient of kinetic friction at jaws
#Maximum muscle forces
self.max_I4 = 1.75 #Maximum pressure grasper can exert on food
self.max_I3ant = 0.6 #Maximum I3 anterior force
self.max_I3 = 1 #Maximum I3 force
self.max_I2 = 1.5 #Maximum I2 force
self.max_hinge = 0.2 #Maximum hinge force
#Muscle time constants
self.tau_I4 = 1.0/np.sqrt(2) #time constant (in seconds) for I4 activation
self.tau_I3anterior = 2.0/np.sqrt(2) #time constant (in seconds) for I3anterior activation
self.tau_I2_ingestion = 0.5*1/np.sqrt(2) #time constant (in seconds) for I2 activation during ingestion
self.tau_I2_egestion = 1.4*1/np.sqrt(2) #time constant (in seconds) for I2 activation during egestion
self.tau_I3 = 1.0/np.sqrt(2) #time constant (in seconds) for I3 activation
self.tau_hinge = 1.0/np.sqrt(2) #time constant (in seconds) for hinge activation
self.TimeStep_h = 0.05
#body time constants
self.c_g = 1.0 #time constant (in seconds) for grapser motion
self.c_h = 1.0 #time constant (in seconds) for body motion
#Spring constants
self.K_sp_h = 2.0 #spring constant representing neck and body between head and ground
self.K_sp_g = 0.1 #spring constant representing attachment between buccal mass and head
#Reference points for springs
self.x_h_ref = 0.0 #head spring reference position
self.x_gh_ref = 0.4 #grasper spring reference position
self.seaweed_strength = 100
self.x_g_threshold = 1
self.x_h_threshold = 1
self.sens_mechanical_grasper = 1
high = np.array([self.x_h_threshold*2,
self.x_g_threshold*2,
np.finfo(np.float32).max,
np.finfo(np.float32).max,
np.finfo(np.float32).max,
1,1],
dtype=np.float32)
self.action_space = gym.spaces.Discrete(32)
self.observation_space = gym.spaces.Box(low = -high,
high = high,
dtype = np.float32)
self._state = np.array([0,0,0,0,0,1,0])
self._episode_ended = False
self.steps_beyond_done = None
self.max_steps = max_steps
self.current_step = 1
self.sens_mechanical_grasper_history = np.zeros((1,self.max_steps+1))
self.sens_chemical_lips_history = np.zeros((1,self.max_steps+1))
self.sens_mechanical_lips_history = np.zeros((1,self.max_steps+1))
self.CBI2_history = np.zeros((1,self.max_steps+1))
self.CBI3_history = np.zeros((1,self.max_steps+1))
self.CBI4_history = np.zeros((1,self.max_steps+1))
self.B64_history = np.zeros((1,self.max_steps+1))
self.B20_history = np.zeros((1,self.max_steps+1))
self.B40B30_history = np.zeros((1,self.max_steps+1))
self.B4B5_history = np.zeros((1,self.max_steps+1))
self.x_g_history=np.zeros((1,self.max_steps+1))
self.x_h_history=np.zeros((1,self.max_steps+1))
self.force_history=np.zeros((1,self.max_steps+1))
self.grasper_friction_state_history=np.zeros((1,self.max_steps+1))
self.B8_history=np.zeros((1,self.max_steps+1))
self.B38_history=np.zeros((1,self.max_steps+1))
self.B7_history=np.zeros((1,self.max_steps+1))
self.B31B32_history=np.zeros((1,self.max_steps+1))
self.B6B9B3_history=np.zeros((1,self.max_steps+1))
self.P_I4_history=np.zeros((1,self.max_steps+1))
self.P_I3_anterior_history=np.zeros((1,self.max_steps+1))
self.T_I3_history=np.zeros((1,self.max_steps+1))
self.T_I2_history=np.zeros((1,self.max_steps+1))
self.T_hinge_history=np.zeros((1,self.max_steps+1))
self.theta_g=np.zeros((1,self.max_steps+1)) # ok
self.x_g_history[0,0] = self.x_g
self.x_h_history[0,0] = self.x_h
self.force_history[0,0]=0
self.grasper_friction_state_history[0,0] = 0
self.theta_g[0,0] = 0
self.StartingTime = 0
self.TimeStep = 0.05
self.EndTime = self.max_steps*0.05
def set_plotting(self,toggle):
self.generat_plots_toggle = toggle
def set_verbose(self, inp):
self.verbose = inp
def step(self,action):
term_stat = -1 # 0: reach max_steps. 1: out of bound
if self._episode_ended:
return self.reset()
if self.current_step == self.max_steps:
term_stat = 0
if self.verbose == 1: print('reset - current_step == self.max_steps')
self._episode_ended = True
elif (self.total_reward < self.cr_threshold) or (self.total_reward < self.threshold and self.current_step > 6*20):
if self.verbose == 1: print('reset early stop- total_reward={}@step {}'.format(self.total_reward, self.current_step))
self._episode_ended = True
elif self.idle_count >= 100: # 5 s, 20 steps/s --> if idle for >= 90 steps, early stop
if self.verbose == 1: print('reset early stop- idle too long:{} steps'.format(self.idle_count))
self._episode_ended = True
elif self.x_h < -0.2 or self.x_h > 1.2 or self.x_g < -0.2 or self.x_g > 1.2:
term_stat = 1
if self.verbose == 1: print('reset - x_h or x_g out!: x_h: {} x_g: {}'.format(self.x_h, self.x_g))
self._episode_ended = True
else:
# obtain current x_h, x_g, force_on_object
[x_h, x_g, force_on_object, pressure_grasper, pressure_jaws, edible, grasper_friction_state] = self._state
if edible == 1:
self.fixation_type = 1
else:
self.fixation_type = 0
tmp = self.Biomechanics_001()
reward = tmp * 100
self.MuscleActivations_001(action)
self._state = np.array([self.x_h, self.x_g, self.force_on_object,self.P_I4, self.P_I3_anterior,edible, self.grasper_friction_state],dtype=np.float32)
self.current_step += 1
self.x_g_history[0,self.current_step] = self.x_g
self.x_h_history[0,self.current_step] = self.x_h
self.force_history[0,self.current_step] = self.force_on_object
self.grasper_friction_state_history[0,self.current_step] = self.grasper_friction_state
if self.grasper_friction_state == self.gfs and self.delta_gm == 0:
self.idle_count += 1
else:
self.idle_count = 0
self.gfs = self.grasper_friction_state
if self.idle_count >= self.patience:
reward -= 1
self.B8_history[0,self.current_step] = self.B8
self.B38_history[0,self.current_step] = self.B38
self.B6B9B3_history[0,self.current_step] = self.B6B9B3
self.B31B32_history[0,self.current_step] = self.B31B32
self.B7_history[0,self.current_step] = self.B7
#history
self.P_I4_history[0,self.current_step] = self.P_I4
self.P_I3_anterior_history[0,self.current_step] = self.P_I3_anterior
self.T_I3_history[0,self.current_step] =self.T_I3
self.T_I2_history[0,self.current_step] = self.T_I2
self.T_hinge_history[0,self.current_step] =self.T_hinge
self.sens_mechanical_grasper_history[0,self.current_step] = math.nan
self.sens_chemical_lips_history[0,self.current_step] = math.nan
self.sens_mechanical_lips_history[0,self.current_step] = math.nan
self.CBI2_history[0,self.current_step] = math.nan
self.CBI3_history[0,self.current_step] = math.nan
self.CBI4_history[0,self.current_step] = math.nan
self.B64_history[0,self.current_step] = math.nan
self.B20_history[0,self.current_step] = math.nan
self.B40B30_history[0,self.current_step] = math.nan
self.B4B5_history[0,self.current_step] = math.nan
if self._episode_ended:
reward = 0.0
if self.generat_plots_toggle == 1:
self.GeneratePlots('Plot_'+str(date.today()))
elif self.generat_plots_toggle == 2:
self.GeneratePlots_training('Plot_'+str(date.today()))
self.total_reward += reward
self.total_reward_log.append(self.total_reward)
return self._state, reward, True, {}
else:
self.total_reward += reward
self.total_reward_log.append(self.total_reward)
return self._state, reward, False, {}
def reset(self):
lb, ub = 0.0, 0.05
self.x_h = random.uniform(lb, ub)
self.x_g = random.uniform(lb, ub)
self.force_on_object =random.uniform(lb, ub)
pressure_grasper = random.uniform(lb, ub)
pressure_jaws = random.uniform(lb, ub)
edible = 1
self._state = np.array([self.x_h, self.x_g, self.force_on_object,pressure_grasper, pressure_jaws,edible,0],dtype=np.float32)
self._episode_ended = False
self.current_step = 1
self.total_reward = 0
self.total_reward_log = [self.total_reward]
self.gfs = 0
self.idle_count = 0
self.delta_gm = 0
self.sens_mechanical_grasper_history = np.zeros((1,self.max_steps+1))
self.sens_chemical_lips_history = np.zeros((1,self.max_steps+1))
self.sens_mechanical_lips_history = np.zeros((1,self.max_steps+1))
self.CBI2_history = np.zeros((1,self.max_steps+1))
self.CBI3_history = np.zeros((1,self.max_steps+1))
self.CBI4_history = np.zeros((1,self.max_steps+1))
self.B64_history = np.zeros((1,self.max_steps+1))
self.B20_history = np.zeros((1,self.max_steps+1))
self.B40B30_history = np.zeros((1,self.max_steps+1))
self.B4B5_history = np.zeros((1,self.max_steps+1))
self.x_g_history=np.zeros((1,self.max_steps+1))
self.x_h_history=np.zeros((1,self.max_steps+1))
self.force_history=np.zeros((1,self.max_steps+1))
self.grasper_friction_state_history=np.zeros((1,self.max_steps+1))
self.B8_history=np.zeros((1,self.max_steps+1))
self.B38_history=np.zeros((1,self.max_steps+1))
self.B7_history=np.zeros((1,self.max_steps+1))
self.B31B32_history=np.zeros((1,self.max_steps+1))
self.B6B9B3_history=np.zeros((1,self.max_steps+1))
self.P_I4_history=np.zeros((1,self.max_steps+1))
self.P_I3_anterior_history=np.zeros((1,self.max_steps+1))
self.T_I3_history=np.zeros((1,self.max_steps+1))
self.T_I2_history=np.zeros((1,self.max_steps+1))
self.T_hinge_history=np.zeros((1,self.max_steps+1))
self.x_g_history[0,0] = self.x_g
self.x_h_history[0,0] = self.x_h
self.force_history[0,0]=0
self.grasper_friction_state_history[0,0] = 0
self.StartingTime = 0
self.TimeStep = 0.05
self.EndTime = self.max_steps*0.05
return self._state
def to_binary(self, num):
tmp = np.binary_repr(num) # e.g., '11001'
if len(tmp) < 5:
tmp = '0'* (5-len(tmp)) + tmp
return [int(i) for i in tmp]
def MuscleActivations_001(self,action):
if isinstance(action, (int, np.integer)):
action = self.to_binary(action)
if self.preset_inputs == 1:
self.B8 = action[2]
self.B38 = action[4]
self.B6B9B3 = action[1]
self.B31B32 = action[3]
self.B7 = action[0]
#the following code works with the python environment without a tf wrappe
elif isinstance(action, list):
self.B8 = action[2]
self.B38 = action[4]
self.B6B9B3 = action[1]
self.B31B32 = action[3]
self.B7 = action[0]
else:
self.B8 = action[0,2]
self.B38 = action[0,4]
self.B6B9B3 = action[0,1]
self.B31B32 = action[0,3]
self.B7 = action[0,0]
edible = self._state[5]
## Update I4: If food present, and grasper closed, then approaches
# pmax pressure as dp/dt=(B8*pmax-p)/tau_p. Use a quasi-backward-Euler
self.P_I4=((self.tau_I4*self.P_I4+self.A_I4*self.TimeStep_h)/(self.tau_I4+self.TimeStep_h))#old -- keep this version
self.A_I4=((self.tau_I4*self.A_I4+self.B8*self.TimeStep_h)/(self.tau_I4+self.TimeStep_h))
## Update pinch force: If food present, and grasper closed, then approaches
# pmax pressure as dp/dt=(B8*pmax-p)/tau_p. Use a quasi-backward-Euler
self.P_I3_anterior=(self.tau_I3anterior*self.P_I3_anterior+self.A_I3_anterior*self.TimeStep_h)/(self.tau_I3anterior+self.TimeStep_h)
self.A_I3_anterior=(self.tau_I3anterior*self.A_I3_anterior+(self.B38+self.B6B9B3)*self.TimeStep_h)/(self.tau_I3anterior+self.TimeStep_h)
## Update I3 (retractor) activation: dm/dt=(B6-m)/tau_m
self.T_I3=(self.tau_I3*self.T_I3+self.TimeStep_h*self.A_I3)/(self.tau_I3+self.TimeStep_h)
self.A_I3=(self.tau_I3*self.A_I3+self.TimeStep_h*self.B6B9B3)/(self.tau_I3+self.TimeStep_h)
## Update I2 (protractor) activation: dm/dt=(B31-m)/tau_m. quasi-B-Eul.
self.T_I2=((self.tau_I2_ingestion*edible+self.tau_I2_egestion*(1-edible))*self.T_I2+self.TimeStep_h*self.A_I2)/((self.tau_I2_ingestion*edible+self.tau_I2_egestion*(1-edible))+self.TimeStep_h)
self.A_I2=((self.tau_I2_ingestion*edible+self.tau_I2_egestion*(1-edible))*self.A_I2+self.TimeStep_h*self.B31B32)/((self.tau_I2_ingestion*edible+self.tau_I2_egestion*(1-edible))+self.TimeStep_h)
## Update Hinge activation: dm/dt=(B7-m)/tau_m. quasi-B-Eul.
#bvec(12,j+1)=(tau_m*hinge_last+dt*B7_last)/(tau_m+dt)#old
self.T_hinge=(self.tau_hinge*self.T_hinge+self.TimeStep_h*self.A_hinge)/(self.tau_hinge+self.TimeStep_h)#new
self.A_hinge=(self.tau_hinge*self.A_hinge+self.TimeStep_h*self.B7)/(self.tau_hinge+self.TimeStep_h)
def Biomechanics_001(self):
edible = self._state[5]
old_gm = np.array([self.x_g - self.x_h])
## Biomechanics
unbroken = 1 #tracking variable to keep track of seaweed being broken off during feeding
x_gh = self.x_g-self.x_h
## Grasper Forces
#all forces in form F = Ax+b
x_vec = np.array([[self.x_h],[self.x_g]])
F_I2 = self.max_I2*self.T_I2*np.dot(np.array([1,-1]),x_vec) + self.max_I2*self.T_I2*1 #FI2 = FI2_max*T_I2*(1-(xg-xh))
F_I3 = self.max_I3*self.T_I3*np.dot(np.array([-1,1]),x_vec)-self.max_I3*self.T_I3*0 #FI3 = FI3_max*T_I3*((xg-xh)-0)
F_hinge = (x_gh>0.5)*self.max_hinge*self.T_hinge*np.dot(np.array([-1,1]),x_vec)-(x_gh>0.5)*self.max_hinge*self.T_hinge*0.5 #F_hinge = [hinge stretched]*F_hinge_Max*T_hinge*((xg-xh)-0.5)
F_sp_g = self.K_sp_g*np.dot(np.array([1,-1]),x_vec)+self.K_sp_g*self.x_gh_ref #F_sp,g = K_g((xghref-(xg-xh))
F_I4 = self.max_I4*self.P_I4
F_I3_ant = (self.max_I3ant*self.P_I3_anterior*np.dot(np.array([1,-1]),x_vec)+self.max_I3ant*
self.P_I3_anterior*1)#: pinch force
#calculate F_f for grasper
if(self.fixation_type == 0): #object is not fixed to a contrained surface
#F_g = F_I2+F_sp_g-F_I3-F_hinge #if the object is unconstrained it does not apply a resistive force back on the grasper. Therefore the force is just due to the muscles
A2 = (1/self.c_g*(self.max_I2*self.T_I2*np.array([1,-1])+self.K_sp_g*np.array([1,-1])
-self.max_I3*self.T_I3*np.array([-1,1])-self.max_hinge*self.T_hinge*
(x_gh>0.5)*np.array([-1,1])))
B2 = (1/self.c_g*(self.max_I2*self.T_I2*1+self.K_sp_g*self.x_gh_ref+self.max_I3*self.T_I3*
0+(x_gh>0.5)*self.max_hinge*self.T_hinge*0.5))
A21 = A2[0]
A22 = A2[1]
#the force on the object is approximated based on the friction
if(abs(F_I2+F_sp_g-F_I3-F_hinge) <= abs(self.mu_s_g*F_I4)): # static friction is true
F_f_g = -self.sens_mechanical_grasper*(F_I2+F_sp_g-F_I3-F_hinge)
self.grasper_friction_state = 1
else:
F_f_g = self.sens_mechanical_grasper*self.mu_k_g*F_I4
#specify sign of friction force
F_f_g = -(F_I2+F_sp_g-F_I3-F_hinge)/abs(F_I2+F_sp_g-F_I3-F_hinge)*F_f_g
self.grasper_friction_state = 0
elif (self.fixation_type == 1): #object is fixed to a contrained surface
if unbroken:
if(abs(F_I2+F_sp_g-F_I3-F_hinge) <= abs(self.mu_s_g*F_I4)): # static friction is true
F_f_g = -self.sens_mechanical_grasper*(F_I2+F_sp_g-F_I3-F_hinge)
#F_g = F_I2+F_sp_g-F_I3-F_hinge + F_f_g
self.grasper_friction_state = 1
#identify matrix components for semi-implicit integration
A21 = 0
A22 = 0
B2 = 0
else:
F_f_g = -np.sign(F_I2+F_sp_g-F_I3-F_hinge)[0]*self.sens_mechanical_grasper*self.mu_k_g*F_I4
#specify sign of friction force
#F_g = F_I2+F_sp_g-F_I3-F_hinge + F_f_g
self.grasper_friction_state = 0
#identify matrix components for semi-implicit integration
A2 = (1/self.c_g*(self.max_I2*self.T_I2*np.array([1,-1])+self.K_sp_g*np.array([1,-1])
-self.max_I3*self.T_I3*np.array([-1,1])-self.max_hinge*self.T_hinge*
(x_gh>0.5)*np.array([-1,1])))
B2 = (1/self.c_g*(self.max_I2*self.T_I2*1+self.K_sp_g*self.x_gh_ref+self.max_I3*self.T_I3
*0+(x_gh>0.5)*self.max_hinge*self.T_hinge*0.5+F_f_g))
A21 = A2[0]
A22 = A2[1]
else:
#F_g = F_I2+F_sp_g-F_I3-F_hinge #if the object is unconstrained it does not apply a resistive force back on the grasper. Therefore the force is just due to the muscles
A2 = (1/self.c_g*(self.max_I2*self.T_I2*np.array([1,-1])+self.K_sp_g*np.array([1,-1])-self.max_I3
*self.T_I3*np.array([-1,1])-self.max_hinge*self.T_hinge*(x_gh>0.5)
*np.array([-1,1])))
B2 = (1/self.c_g*(self.max_I2*self.T_I2*1+self.K_sp_g*self.x_gh_ref+self.max_I3*self.T_I3*
0+(x_gh>0.5)*self.max_hinge*self.T_hinge*0.5))
A21 = A2[0]
A22 = A2[1]
#the force on the object is approximated based on the friction
if(abs(F_I2+F_sp_g-F_I3-F_hinge) <= abs(self.mu_s_g*F_I4)): # static friction is true
F_f_g = -self.sens_mechanical_grasper*(F_I2+F_sp_g-F_I3-F_hinge)
self.grasper_friction_state = 1
else:
F_f_g = self.sens_mechanical_grasper*self.mu_k_g*F_I4
#specify sign of friction force
F_f_g = -(F_I2+F_sp_g-F_I3-F_hinge)/abs(F_I2+F_sp_g-F_I3-F_hinge)*F_f_g
self.grasper_friction_state = 0
#[j*dt position_grasper_relative I2 F_sp I3 hinge GrapserPressure_last F_g]
## Body Forces
#all forces in the form F = Ax+b
F_sp_h = self.K_sp_h*np.dot(np.array([-1,0]),x_vec)+self.x_h_ref*self.K_sp_h
#all muscle forces are equal and opposite
if(self.fixation_type == 0): #object is not constrained
#F_h = F_sp_h #If the object is unconstrained it does not apply a force back on the head. Therefore the force is just due to the head spring.
A1 = 1/self.c_h*self.K_sp_h*np.array([-1,0])
B1 = 1/self.c_h*self.x_h_ref*self.K_sp_h
A11 = A1[0]
A12 = A1[1]
if(abs(F_sp_h+F_f_g) <= abs(self.mu_s_h*F_I3_ant)): # static friction is true
F_f_h = -self.sens_mechanical_grasper*(F_sp_h+F_f_g) #only calculate the force if an object is actually present
self.jaw_friction_state = 1
else:
F_f_h = -np.sign(F_sp_h+F_f_g)[0]*self.sens_mechanical_grasper*self.mu_k_h*F_I3_ant #only calculate the force if an object is actually present
self.jaw_friction_state = 0
elif (self.fixation_type == 1):
#calcuate friction due to jaws
if unbroken: #if the seaweed is intact
if(abs(F_sp_h+F_f_g) <= abs(self.mu_s_h*F_I3_ant)): # static friction is true
F_f_h = -self.sens_mechanical_grasper*(F_sp_h+F_f_g) #only calculate the force if an object is actually present
#F_h = F_sp_h+F_f_g + F_f_h
self.jaw_friction_state = 1
A11 = 0
A12 = 0
B1 = 0
else:
F_f_h = -np.sign(F_sp_h+F_f_g)[0]*self.sens_mechanical_grasper*self.mu_k_h*F_I3_ant #only calculate the force if an object is actually present
#F_h = F_sp_h+F_f_g + F_f_h
self.jaw_friction_state = 0
if (self.grasper_friction_state == 1): #object is fixed and grasper is static
# F_f_g = -mechanical_in_grasper*(F_I2+F_sp_g-F_I3-F_Hi)
A1 = (1/self.c_h*(self.K_sp_h*np.array([-1,0])+(-self.sens_mechanical_grasper*
(self.max_I2*self.T_I2*np.array([1,-1])
+self.K_sp_g*np.array([1,-1])-self.max_I3*
self.T_I3*np.array([-1,1])-self.max_hinge*
self.T_hinge*(x_gh>0.5)*np.array([-1,1]))
-np.sign(F_sp_h+F_f_g)[0]*self.sens_mechanical_grasper*self.mu_k_h*
self.max_I3ant*self.P_I3_anterior
*np.array([1,-1]))))
B1 = (1/self.c_h*(self.x_h_ref*self.K_sp_h+(-self.sens_mechanical_grasper*(self.max_I2*self.T_I2*1+self.K_sp_g*self.x_gh_ref+self.max_I3*self.T_I3*0+(x_gh>0.5)*self.max_hinge*self.T_hinge*0.5))
-np.sign(F_sp_h+F_f_g)[0]*self.sens_mechanical_grasper*self.mu_k_h*self.max_I3ant*self.P_I3_anterior*1))
else: #both are kinetic
#F_f_g = -np.sign(F_I2+F_sp_g-F_I3-F_Hi)*mechanical_in_grasper*mu_k_g*F_I4
A1 = (1/self.c_h*(self.K_sp_h*np.array([-1,0])-np.sign(F_sp_h+F_f_g)[0]
*self.sens_mechanical_grasper*self.mu_k_h*self.max_I3ant*
self.P_I3_anterior*np.array([1,-1])))
B1 = (1/self.c_h*(self.x_h_ref*self.K_sp_h-np.sign(F_I2+F_sp_g-F_I3-F_hinge)[0]*
self.sens_mechanical_grasper*self.mu_k_g*F_I4
-np.sign(F_sp_h+F_f_g)[0]*self.sens_mechanical_grasper*
self.mu_k_h*self.max_I3ant*self.P_I3_anterior*1))
A11 = A1[0]
A12 = A1[1]
else: # if the seaweed is broken the jaws act as if unconstrained
if(abs(F_sp_h+F_f_g) <= abs(self.mu_s_h*F_I3_ant)): # static friction is true
F_f_h = -self.sens_mechanical_grasper*(F_sp_h+F_f_g) #only calculate the force if an object is actually present
self.jaw_friction_state = 1
else:
F_f_h = -np.sign(F_sp_h+F_f_g)[0]*self.sens_mechanical_grasper*self.mu_k_h*F_I3_ant #only calculate the force if an object is actually present
self.jaw_friction_state = 0
A1 = 1/self.c_h*self.K_sp_h*[-1,0]
B1 = 1/self.c_h*self.x_h_ref*self.K_sp_h
A11 = A1[0]
A12 = A1[1]
self.jaw_friction_state = 0
A = np.array([[A11,A12],[A21,A22]])
B = np.array([[B1],[B2]])
x_last = np.array(x_vec)
x_new = (1/(1-self.TimeStep_h*A.trace()))*(np.dot((np.identity(2)+self.TimeStep_h*
np.array([[-A22,A12],[A21,-A11]])),x_last)+
self.TimeStep_h*B)
self.x_g = x_new[1,0]
self.x_h = x_new[0,0]
## calculate force on object
self.force_on_object = F_f_g+F_f_h
#check if seaweed is broken
if (self.fixation_type ==1):
if (self.force_on_object>self.seaweed_strength):
print('seaweed broke')
unbroken = 0
#check to see if a new cycle has started
x_gh_next = self.x_g-self.x_h
if (not unbroken and x_gh <0.3 and x_gh_next>x_gh):
print('start a new cycle')
unbroken = 1
self.force_on_object= unbroken*self.force_on_object
new_gm = np.array([self.x_g - self.x_h]) # grasper motion = self.x_g - self.x_h
delta_gm = new_gm - old_gm
self.delta_gm = delta_gm
if self.grasper_friction_state:
if delta_gm < 0:
reward = - delta_gm * (self.force_on_object ** self.foo)
else:
reward = - delta_gm
if not edible:
reward *= -1
else:
if self.force_on_object < 0 and delta_gm > 0:
reward = self.force_on_object / 100
else:
reward = [0.0]
return reward[0]
def GeneratePlots(self,label):
import math, copy
self.EndTime = (self.current_step - 1) * self.TimeStep
t=np.atleast_2d(np.arange(self.StartingTime,self.EndTime+self.TimeStep,self.TimeStep))
self.EndTime = self.max_steps*0.05
end_ind = t.shape[1]
tmp = [1] * 15
tmp.extend([2,2])
axs = plt.figure(figsize=(10,15), constrained_layout=True).subplots(17,1, sharex=True, gridspec_kw={'height_ratios': tmp})
lineW =2
i= 0
#External Stimuli
ax0 = axs[0]
ax0.plot(t.transpose(),self.sens_mechanical_grasper_history[0,:end_ind].transpose(), color=[56/255, 232/255, 123/255],linewidth=2) #mechanical in grasper
ax0.set_ylabel('Mech. in Grasper')
i=1
ax = axs[i]
ax.plot(t.transpose(),self.sens_chemical_lips_history[0,:end_ind].transpose(), color=[70/255, 84/255, 218/255],linewidth=2) #chemical at lips
ax.set_ylabel('Chem. at Lips')
i=i+1
ax = axs[i]
ax.plot(t.transpose(),self.sens_mechanical_lips_history[0,:end_ind].transpose(), color=[47/255, 195/255, 241/255],linewidth=2) #mechanical at lips
ax.set_ylabel('Mech. at Lips')
i=i+1
ax = axs[i]
ax.plot(t.transpose(),self.CBI2_history[0,:end_ind].transpose(),'k',linewidth=lineW) # CBI2
ax.set_ylabel('CBI-2')
i=i+1
ax = axs[i]
ax.plot(t.transpose(),self.CBI3_history[0,:end_ind].transpose(),'k',linewidth=lineW) # CBI3
ax.set_ylabel('CBI-3')
i=i+1
ax = axs[i]
ax.plot(t.transpose(),self.CBI4_history[0,:end_ind].transpose(),'k',linewidth=lineW) # CBI4
ax.set_ylabel('CBI-4')
i=i+1
#Interneurons
ax = axs[i]
ax.plot(t.transpose(),self.B64_history[0,:end_ind].transpose(),linewidth=lineW, color=[90/255, 131/255, 198/255]) # B64
ax.set_ylabel('B64', color=[90/255, 131/255, 198/255])
i=i+1
ax = axs[i]
ax.plot(t.transpose(),self.B20_history[0,:end_ind].transpose(),linewidth=lineW, color=[44/255, 166/255, 90/255]) # B20
i=i+1;
ax.set_ylabel('B20', color=[44/255, 166/255, 90/255])
ax = axs[i]
ax.plot(t.transpose(),self.B40B30_history[0,:end_ind].transpose(),linewidth=lineW, color=[192/255, 92/255, 185/255]) # B40/B30
i=i+1;
ax.set_ylabel('B40/B30', color=[192/255, 92/255, 185/255])
ax = axs[i]
ax.plot(t.transpose(),self.B4B5_history[0,:end_ind].transpose(),linewidth=lineW, color=[51/255, 185/255, 135/255]) # B4/5
i=i+1;
ax.set_ylabel('B4/B5', color=[51/255, 185/255, 135/255])
#motor neurons
ax = axs[i]
ax.plot(t.transpose(),self.B31B32_history[0,:end_ind].transpose(),linewidth=lineW, color=[220/255, 81/255, 81/255]) # I2 input
i=i+1;
ax.set_ylabel('B31/B32',color=[220/255, 81/255, 81/255])
ax = axs[i]
ax.plot(t.transpose(),self.B8_history[0,:end_ind].transpose(),linewidth=lineW, color=[213/255, 155/255, 196/255]) # B8a/b
i=i+1;
ax.set_ylabel('B8a/b', color=[213/255, 155/255, 196/255])
ax = axs[i]
ax.plot(t.transpose(),self.B38_history[0,:end_ind].transpose(),linewidth=lineW, color=[238/255, 191/255, 70/255]) # B38
i=i+1;
ax.set_ylabel('B38', color=[238/255, 191/255, 70/255])
ax = axs[i]
ax.plot(t.transpose(),self.B6B9B3_history[0,:end_ind].transpose(),linewidth=lineW, color=[90/255, 155/255, 197/255]) # B6/9/3
i=i+1;
ax.set_ylabel('B6/B9/B3', color=[90/255, 155/255, 197/255])
ax = axs[i]
ax.plot(t.transpose(),self.B7_history[0,:end_ind].transpose(),linewidth=lineW, color=[56/255, 167/255, 182/255]) # B7
i=i+1;
ax.set_ylabel('B7', color=[56/255, 167/255, 182/255])
#Grasper Motion plot
grasper_motion = self.x_g_history - self.x_h_history
ax = axs[i]
ax.plot(t.transpose(),grasper_motion[0,:end_ind].transpose(),'b',linewidth=lineW)
# overlay the grasper friction state as thick blue dots
grasper_motion_gfs = copy.deepcopy(grasper_motion) # long
t_gfs = copy.deepcopy(t) # short
grasper_motion_gfs[self.grasper_friction_state_history != 1] = math.nan # long
t_gfs[0, self.grasper_friction_state_history[0, :end_ind] != 1] = math.nan # short
ax.plot(t_gfs.transpose(), grasper_motion_gfs[0,:end_ind].transpose(),'b', linewidth = lineW * 2)
# overlay b&w bars
gm_delta = np.zeros_like(grasper_motion) # long
t_delta = copy.deepcopy(t) # short
gm_delta[:,1:] = grasper_motion[:,1:] - grasper_motion[:,:-1]
t_delta[0,gm_delta[0, :end_ind] <= 0] = math.nan
gm_delta[gm_delta > 0] = 1.25
gm_delta[gm_delta != 1.25 ] = math.nan
ax.plot(t.transpose(), 1.25 * | np.ones_like(t) | numpy.ones_like |
# -*- coding: utf-8 -*-
"""
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Apr 14, 2015
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from cuda4py.blas import CUBLAS_OP_N, CUBLAS_OP_T
import gc
import numpy
from veles.config import root
from veles.memory import Array
from veles.ocl_blas import OCLBLAS
from veles.tests import AcceleratedTest, assign_backend
import veles.prng as prng
class TestOCLBLASBase(AcceleratedTest):
ABSTRACT = True
def test_veles_blas(self):
for enabled in (True, False):
root.common.engine.ocl.clBLAS = enabled
blas = OCLBLAS(self.device)
for _ in range(2):
self._test_random(blas, 17, 1999, 231)
gc.collect()
self._test_random(blas, 7, 9, 8)
gc.collect()
self._test_random(blas, 9, 7, 800)
gc.collect()
self._test_random(blas, 1, 1, 1)
gc.collect()
self._test_random(blas, 7777, 17, 219)
gc.collect()
self._test_random(blas, 1777, 1999, 2119)
gc.collect()
del blas
gc.collect()
def _test_random(self, blas, a_size, b_size, common_size):
rnd = prng.RandomGenerator(None)
rnd.seed(123)
a = Array(numpy.zeros([a_size, common_size], dtype=self.dtype))
b = Array(numpy.zeros([b_size, common_size], dtype=self.dtype))
c = Array(numpy.zeros([a_size, b_size], dtype=self.dtype))
rnd.fill(a.mem)
rnd.fill(b.mem)
c_gold = numpy.dot(a.mem, b.mem.transpose()).transpose().ravel()
at = a.mem.reshape(tuple(reversed(a.shape))).transpose()
bt = b.mem.reshape(tuple(reversed(b.shape))).transpose()
c_gold_t = numpy.dot(at, bt.transpose()).transpose().ravel()
a.initialize(self.device)
b.initialize(self.device)
c.initialize(self.device)
alpha = numpy.ones(1, dtype=self.dtype)
beta = numpy.zeros(1, dtype=self.dtype)
gemm = OCLBLAS.gemm(self.dtype)
gemm(blas, CUBLAS_OP_T, CUBLAS_OP_N,
a_size, b_size, common_size,
alpha, a.devmem, b.devmem, beta, c.devmem)
c.map_read()
max_diff = | numpy.fabs(c.plain - c_gold) | numpy.fabs |
# Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
import pytest
import numpy as np
import numpy.ma as ma
import pandas as pd
import scipy as sp
import math
from itertools import repeat, chain
from ..bin import *
from ..bin import _process_column_initial, _encode_categorical_existing, _process_continuous
class StringHolder:
def __init__(self, internal_str):
self.internal_str = internal_str
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedStringHolder(StringHolder):
def __init__(self, internal_str):
StringHolder.__init__(self, internal_str)
class FloatHolder:
def __init__(self, internal_float):
self.internal_float = internal_float
def __float__(self):
return self.internal_float
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatHolder(FloatHolder):
def __init__(self, internal_float):
FloatHolder.__init__(self, internal_float)
class FloatAndStringHolder:
def __init__(self, internal_float, internal_str):
self.internal_float = internal_float
self.internal_str = internal_str
def __float__(self):
return self.internal_float
def __str__(self):
return self.internal_str
def __lt__(self, other):
return True # make all objects of this type identical to detect sorting failures
def __hash__(self):
return 0 # make all objects of this type identical to detect hashing failures
def __eq__(self,other):
return True # make all objects of this type identical to detect hashing failures
class DerivedFloatAndStringHolder(FloatAndStringHolder):
def __init__(self, internal_float, internal_str):
FloatAndStringHolder.__init__(self, internal_float, internal_str)
class NothingHolder:
# the result of calling str(..) includes the memory address, so they won't be dependable categories
def __init__(self, internal_str):
self.internal_str = internal_str
def check_pandas_normal(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val1, val2], dtype=np.object_), dtype=dtype)
feature_types_given = ['nominal']
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None)], feature_names_in, None))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c1 = {str(val1) : 1, str(val2) : 2}
X_cols = list(unify_columns(X, [(0, c1)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
c2 = {str(val2) : 1, str(val1) : 2}
X_cols = list(unify_columns(X, [(0, c2)], feature_names_in, feature_types_given))
assert(len(X_cols) == 1)
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 2)
assert(X_cols[0][1][0] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val2)])
def check_pandas_missings(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
c1 = {str(val1) : 1, str(val2) : 2}
c2 = {str(val2) : 1, str(val1) : 2}
feature_types_given = ['nominal', 'nominal', 'nominal', 'nominal']
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X, feature_types_given=feature_types_given)
X_cols = list(unify_columns(X, [(0, None), (1, None), (2, None), (3, None)], feature_names_in, None))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(len(X_cols[0][2]) == 2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(len(X_cols[1][2]) == 2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(len(X_cols[2][2]) == 2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(len(X_cols[3][2]) == 2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c1), (2, c1), (3, c1)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c1)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c1)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c2), (1, c2), (2, c2), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c2)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c2)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
X_cols = list(unify_columns(X, [(0, c1), (1, c2), (2, c1), (3, c2)], feature_names_in, feature_types_given))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'nominal')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is c1)
assert(X_cols[0][1].dtype == np.int64)
assert(len(X_cols[0][1]) == 3)
assert(X_cols[0][1][0] == X_cols[0][2][str(val2)])
assert(X_cols[0][1][1] == X_cols[0][2][str(val1)])
assert(X_cols[0][1][2] == X_cols[0][2][str(val1)])
assert(X_cols[1][0] == 'nominal')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is c2)
assert(X_cols[1][1].dtype == np.int64)
assert(len(X_cols[1][1]) == 3)
assert(X_cols[1][1][0] == 0)
assert(X_cols[1][1][1] == X_cols[1][2][str(val2)])
assert(X_cols[1][1][2] == X_cols[1][2][str(val1)])
assert(X_cols[2][0] == 'nominal')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is c1)
assert(X_cols[2][1].dtype == np.int64)
assert(len(X_cols[2][1]) == 3)
assert(X_cols[2][1][0] == X_cols[2][2][str(val1)])
assert(X_cols[2][1][1] == 0)
assert(X_cols[2][1][2] == X_cols[2][2][str(val2)])
assert(X_cols[3][0] == 'nominal')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is c2)
assert(X_cols[3][1].dtype == np.int64)
assert(len(X_cols[3][1]) == 3)
assert(X_cols[3][1][0] == X_cols[3][2][str(val2)])
assert(X_cols[3][1][1] == X_cols[3][2][str(val1)])
assert(X_cols[3][1][2] == 0)
assert(np.array_equal(X_cols[1][1] == 0, X.iloc[:, 1].isna()))
assert(np.array_equal(X_cols[2][1] == 0, X.iloc[:, 2].isna()))
assert(np.array_equal(X_cols[3][1] == 0, X.iloc[:, 3].isna()))
def check_pandas_float(dtype, val1, val2):
X = pd.DataFrame()
X["feature1"] = pd.Series(np.array([val2, val1, val1], dtype=np.object_), dtype=dtype)
X["feature2"] = pd.Series(np.array([None, val2, val1], dtype=np.object_), dtype=dtype)
X["feature3"] = pd.Series(np.array([val1, None, val2], dtype=np.object_), dtype=dtype)
X["feature4"] = pd.Series(np.array([val2, val1, None], dtype=np.object_), dtype=dtype)
X, n_samples = clean_X(X)
assert(n_samples == 3)
feature_names_in = unify_feature_names(X)
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in, min_unique_continuous=0))
assert(4 == len(X_cols))
assert(X_cols[0][0] == 'continuous')
assert(X_cols[0][3] is None)
assert(X_cols[0][2] is None)
assert(X_cols[0][1].dtype == np.float64)
assert(X_cols[0][1][0] == np.float64(dtype(val2)))
assert(X_cols[0][1][1] == np.float64(dtype(val1)))
assert(X_cols[0][1][2] == np.float64(dtype(val1)))
assert(X_cols[1][0] == 'continuous')
assert(X_cols[1][3] is None)
assert(X_cols[1][2] is None)
assert(X_cols[1][1].dtype == np.float64)
assert(np.isnan(X_cols[1][1][0]))
assert(X_cols[1][1][1] == np.float64(dtype(val2)))
assert(X_cols[1][1][2] == np.float64(dtype(val1)))
assert(X_cols[2][0] == 'continuous')
assert(X_cols[2][3] is None)
assert(X_cols[2][2] is None)
assert(X_cols[2][1].dtype == np.float64)
assert(X_cols[2][1][0] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[2][1][1]))
assert(X_cols[2][1][2] == np.float64(dtype(val2)))
assert(X_cols[3][0] == 'continuous')
assert(X_cols[3][3] is None)
assert(X_cols[3][2] is None)
assert(X_cols[3][1].dtype == np.float64)
assert(X_cols[3][1][0] == np.float64(dtype(val2)))
assert(X_cols[3][1][1] == np.float64(dtype(val1)))
assert(np.isnan(X_cols[3][1][2]))
def check_numpy_throws(dtype_src, val1, val2):
X = np.array([[val1, val2], [val1, val2]], dtype=dtype_src)
X, n_samples = clean_X(X)
assert(n_samples == 2)
feature_names_in = unify_feature_names(X)
try:
X_cols = list(unify_columns(X, zip(range(len(feature_names_in)), repeat(None)), feature_names_in))
assert(False)
except:
pass
def test_process_continuous_float64():
vals, bad = _process_continuous(np.array([3.5, 4.5], dtype=np.float64), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([3.5, 4.5], dtype=np.float64)))
def test_process_continuous_float32():
vals, bad = _process_continuous(np.array([3.1, np.nan], dtype=np.float32), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 3.0999999046325684)
assert(np.isnan(vals[1]))
def test_process_continuous_int8():
vals, bad = _process_continuous(np.array([7, -9], dtype=np.int8), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([7, -9], dtype=np.float64)))
def test_process_continuous_uint16_missing():
vals, bad = _process_continuous(np.array([7], dtype=np.uint16), np.array([True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 2)
assert(vals[0] == 7)
assert(np.isnan(vals[1]))
def test_process_continuous_bool():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([0, 1], dtype=np.float64)))
def test_process_continuous_bool_missing():
vals, bad = _process_continuous(np.array([False, True], dtype=np.bool_), np.array([True, False, True], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 0)
assert(np.isnan(vals[1]))
assert(vals[2] == 1)
def test_process_continuous_obj_simple():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5], dtype=np.float64)))
def test_process_continuous_obj_simple_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5")], dtype=np.object_), np.array([True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 6)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(np.isnan(vals[5]))
def test_process_continuous_obj_hard():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), DerivedStringHolder("7.5"), FloatHolder(8.5), DerivedFloatHolder(9.5), FloatAndStringHolder(10.5, "88"), DerivedFloatAndStringHolder(11.5, "99")], dtype=np.object_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5, 3, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5], dtype=np.float64)))
def test_process_continuous_obj_hard_missing():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5")], dtype=np.object_), np.array([True, True, True, True, True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 7)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[6]))
def test_process_continuous_obj_hard_bad():
vals, bad = _process_continuous(np.array([1, 2.5, "3", "4.5", np.float32("5.5"), StringHolder("6.5"), "bad", StringHolder("bad2"), NothingHolder("bad3")], dtype=np.object_), np.array([True, True, True, True, True, True, True, False, True, True], dtype=np.bool_))
assert(len(bad) == 10)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] is None)
assert(bad[3] is None)
assert(bad[4] is None)
assert(bad[5] is None)
assert(bad[6] == "bad")
assert(bad[7] is None)
assert(bad[8] == "bad2")
assert(isinstance(bad[9], str))
assert(vals.dtype == np.float64)
assert(len(vals) == 10)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(vals[2] == 3)
assert(vals[3] == 4.5)
assert(vals[4] == 5.5)
assert(vals[5] == 6.5)
assert(np.isnan(vals[7]))
def test_process_continuous_str_simple():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), None)
assert(bad is None)
assert(vals.dtype == np.float64)
assert(np.array_equal(vals, np.array([1, 2.5], dtype=np.float64)))
def test_process_continuous_str_simple_missing():
vals, bad = _process_continuous(np.array(["1", "2.5"], dtype=np.unicode_), np.array([True, True, False], dtype=np.bool_))
assert(bad is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 3)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[2]))
def test_process_continuous_str_hard_bad():
vals, bad = _process_continuous(np.array(["1", "2.5", "bad"], dtype=np.unicode_), np.array([True, True, True, False], dtype=np.bool_))
assert(len(bad) == 4)
assert(bad[0] is None)
assert(bad[1] is None)
assert(bad[2] == "bad")
assert(bad[3] is None)
assert(vals.dtype == np.float64)
assert(len(vals) == 4)
assert(vals[0] == 1)
assert(vals[1] == 2.5)
assert(np.isnan(vals[3]))
def test_process_column_initial_int_float():
# this test is hard since np.unique seems to think int(4) == float(4.0) so naively it returns just "4"
encoded, c = _process_column_initial(np.array([4, 4.0], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["4"] == 1)
assert(c["4.0"] == 2)
assert(np.array_equal(encoded, np.array([c["4"], c["4.0"]], dtype=np.int64)))
def test_process_column_initial_float32_float64():
# np.float64(np.float32(0.1)) != np.float64(0.1) since the float32 to float64 version has the lower mantisa bits
# all set to zero, and there will be another float64 that will be closer to "0.1" in float64 representation, so
# they aren't the same, but if to convert them to strings first then they are identical. Strings are the
# ultimate arbiter of categorical membership since strings are cross-platform and JSON encodable. np.unique
# will tend to separate the float32 and the float64 values since they aren't the same, but then serialize
# them to the same string. The our model has ["0.1", "0.1"] as the categories if we don't convert to float64!
encoded, c = _process_column_initial(np.array([np.float32(0.1), np.float64(0.1)], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["0.1"] == 1)
assert(c["0.10000000149011612"] == 2)
assert(np.array_equal(encoded, np.array([c["0.10000000149011612"], c["0.1"]], dtype=np.int64)))
def test_process_column_initial_obj_obj():
encoded, c = _process_column_initial(np.array([StringHolder("abc"), StringHolder("def")], dtype=np.object_), None, None, None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["def"] == 2)
assert(np.array_equal(encoded, np.array([c["abc"], c["def"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64)))
def test_process_column_initial_alphabetical_missing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), np.array([True, True, False, True], dtype=np.bool_), 'nominal_alphabetical', None)
assert(len(c) == 2)
assert(c["abc"] == 1)
assert(c["xyz"] == 2)
assert(np.array_equal(encoded, np.array([c["xyz"], c["abc"], 0, c["xyz"]], dtype=np.int64)))
def test_process_column_initial_prevalence_nomissing():
encoded, c = _process_column_initial(np.array(["xyz", "abc", "xyz"], dtype=np.unicode_), None, 'nominal_prevalence', None)
assert(len(c) == 2)
assert(c["xyz"] == 1)
assert(c["abc"] == 2)
assert(np.array_equal(encoded, | np.array([c["xyz"], c["abc"], c["xyz"]], dtype=np.int64) | numpy.array |
from flask import Flask,request,json,jsonify #pip install Flask && pip install -v scikit-learn==0.23.1
import numpy as np #pip install numpy
import pandas as pd
app = Flask(__name__)
@app.route('/recommend/',methods=['GET', 'POST'])
def recommend():
data = request.get_json()
cat = []
ints = []
ideas = []
for i in data['cat']:
cat.append(i)
for i in data['ints']:
ints.append(i)
for i in data['ideas']:
ideas.append(i)
ideas1 =[]
for i in cat:
x = 0
if i == ideas[x]:
ideas1.append(1)
else:
ideas1.append(0)
x +=1
cats1 =[]
for i in ideas:
x = 0
if i == cat[x]:
cats1.append(1)
else:
cats1.append(0)
x +=1
cats1= | np.array(cats1) | numpy.array |
import pysplishsplash
import gym
import pickle
import numpy as np
import torch
import argparse
import os,sys
import time
from scipy.ndimage import gaussian_filter,gaussian_filter1d
from scipy.stats import linregress
from scipy.spatial.transform import Rotation as R
import math
import matplotlib.pyplot as plt
from tqdm import tqdm,trange
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def convert_state_to_torch(state):
features = torch.FloatTensor(np.array([state[0]]).reshape(1, -1)).to(device)
particles = torch.FloatTensor(state[1].reshape(1,*state[1].shape)).to(device)
return features,particles
def evalstuff(state,action,td3):
features,particles = convert_state_to_torch(state)
features[-1] = 0
#td3.actor.eval()
#td3.critic.eval()
#print("likestuff",td3.actor(features,particles),td3.critic.Q1(features,particles, td3.actor(features,particles)))
#print("action",action)
q_val = policy.eval_q(state,action)
#print(state[0],action,q_val)
#print("chosen",q_val)
#print("zero",policy.eval_q(state,[0]))
#print("special",policy.eval_q(state,[1]))
#print("one",policy.eval_q(state,[1,1,1]))
return (q_val[0][0]+q_val[1][0])/2
def train(state,td3):
batch_size = 32
all_feat = []
all_part = []
for _ in range(batch_size):
f,p = convert_state_to_torch(state)
all_feat.append(f)
all_part.append(p)
features = torch.cat(all_feat,0)
particles = torch.cat(all_part,0)
td3._actor_learn(features,particles)
def plot_q_compare(rew_lists,q_lists,discount,path,show=False):
maxi = max(len(x) for x in rew_lists)
print([len(x) for x in rew_lists])
emp_rewards = [0 for _ in range(len(rew_lists))]
emp_avg = []
q_avg = []
for i in range(maxi-1,-1,-1):
for j in range(len(rew_lists)):
emp_pot = []
q_pot = []
if len(rew_lists[j]) > i:
emp_rewards[j] = emp_rewards[j]*discount + rew_lists[j][i]
emp_pot.append(emp_rewards[j])
q_pot.append(q_lists[j][i])
emp_avg.append(np.mean(emp_pot))
q_avg.append(np.mean(q_pot))
emp_avg.reverse()
q_avg.reverse()
plt.plot(emp_avg,label="empirical Q value (discounted)")
plt.plot(q_avg,label="TD3 computed Q value")
plt.xlabel("time step")
plt.ylabel("Q-value")
plt.legend()
plt.savefig(path)
if show:
plt.show()
plt.cla()
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
def smooth_compare(x_ax1,x_ax2,vals1,vals2,xlabel,ylabel,legend_vals,path,show=False,sigma=5):
fig = plt.figure(figsize=(10,4))
lims = (max(min(x_ax1),min(x_ax2)),min(max(x_ax1),max(x_ax2)))
v1 = gaussian_filter1d( | np.array(vals1,dtype=np.float) | numpy.array |
import os
import time
import numpy as np
os.system(r"printf '\033[2J'")
field = | np.full((23, 80), " ") | numpy.full |
"""
Library of utilities that can be used for plotting
"""
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
import cubedsphere.const as c
from .utils import _flatten_ds
def overplot_wind(ds_reg, U, V, stepsize=1):
"""
Quick and dirty function for overplotting wind of a regridded dataset
Parameters
----------
ds_reg: xarray DataSet
regridded dataset
stepsize: integer
specify the stepsize for which wind arrows should be plotted
"""
ax = plt.gca()
y, x = ds_reg["lat"].values, ds_reg["lon"].values
xmesh, ymesh = np.meshgrid(x, y)
ax.quiver(xmesh[::stepsize, ::stepsize], ymesh[::stepsize, ::stepsize], U[::stepsize, ::stepsize],
V[::stepsize, ::stepsize])
def plotCS(dr, ds, mask_size=None, **kwargs):
"""
A quick way to plot cubed-sphere data of resolution 6*N*N.
Wrapping plotCS_quick_raw to work with xarray objects
Parameters
----------
dr: xarray DataArray
The dimensions must be (y,x)
ds: xarray DataSet (the parent DataSet of dr)
Must contain XC, YC as coordinate variables.
mask_size: None or int
The overlap size of individual tiles. If None is chosen one might likely experience issues
**kwargs
Other keyword arguments such as cmap will be passed to plotCS_quick_raw() and subsequently to plt.pcolormesh()
Returns
-------
ph: list
List of mappabales
"""
# must convert xarray objects to raw numpy arrays
# otherwise numpy masking functions won't work
x_dim, y_dim = c.lon, c.lat
if len(ds[c.lon].shape) > 2:
if ds[c.lon].shape[-1] == dr.shape[-1]:
x_dim = c.lon
x = _flatten_ds(ds[x_dim]).values
data = _flatten_ds(dr).values
elif ds[c.lon_b].shape[-1] == dr.shape[-1]:
x_dim = c.lon_b
x = _flatten_ds(ds[x_dim]).values
data = _flatten_ds(dr).values
if ds[c.lat].shape[-2] == dr.shape[-2]:
y_dim = c.lat
y = _flatten_ds(ds[y_dim]).values
elif ds[c.lat_b].shape[-2] == dr.shape[-2]:
y_dim = c.lat_b
y = _flatten_ds(ds[y_dim]).values
else:
x = ds[x_dim].values
y = ds[y_dim].values
data = dr.values
#assert dr.shape == ds[
# x_dim].shape, f"shape mismatch. shape of data: {dr.shape}, shape of coordinates: {ds[x_dim].shape}"
#assert dr.shape == ds[
# y_dim].shape, f"shape mismatch. shape of data: {dr.shape}, shape of coordinates: {ds[y_dim].shape}"
if mask_size is not None:
try:
mask = np.abs(x - 180) < mask_size
data = np.ma.masked_where(mask, data)
except IndexError:
print("caution: No masking possible!")
return _plot_cs_raw(x, y, data, **kwargs)
def _plot_cs_raw(x, y, data, projection=None, vmin=None, vmax=None, **kwargs):
"""
Plots 2D scalar fields on the MITgcm cubed sphere grid with pcolormesh.
Adapted from MITgcmutils (https://github.com/MITgcm/MITgcm/tree/master/utils/python/MITgcmutils/MITgcmutils/cs)
Parameters
----------
x: array_like
'xg', that is, x coordinate of the points one half grid cell to the
left and bottom, that is vorticity points for tracers, etc.
y: array_like
'yg', that is, y coordinate of same points
data: array_like
scalar field at tracer points
projection: Basemap instance, optional
used to transform if present.
Unfortunatly, cylindrical and conic maps are limited to
the [-180 180] range.
projection = 'sphere' results in a 3D visualization on the sphere
without any specific projection. Good for debugging.
Returns
-------
ph: list
List of mappabales
"""
# pcol first divides the 2D cs-field(6*n,n) into six faces. Then for
# each face, an extra row and colum is added from the neighboring faces in
# order to fool pcolor into drawing the entire field and not just
# (n-1,m-1) data points. There are two corner points that have no explicit
# coordinates so that they have to be found by
# interpolation/averaging. Then each face is divided into 4 tiles,
# assuming cs-geometry, and each tile is plotted individually in
# order to avoid problems due to ambigous longitude values (the jump
# between -180 and 180, or 360 and 0 degrees). As long as the poles
# are at the centers of the north and south faces and the first tile is
# symmetric about its center this should work.
# get the figure handle
fig = plt.gcf()
mapit = 0
if projection != None:
mp = projection
if mp == 'sphere':
mapit = -1
else:
mapit = 1
# convert to [-180 180[ representation
x = np.where(x > 180, x - 360., x)
ny, nx = data.shape
# determine range for color range
cax = [data.min(), data.max()]
if cax[1] - cax[0] == 0: cax = [cax[0] - 1, cax[1] + 1]
if vmin != None: cax[0] = vmin
if vmax != None: cax[1] = vmax
if mapit == -1:
# set up 3D plot
if len(fig.axes) > 0:
# if present, remove and replace the last axis of fig
geom = fig.axes[-1].get_geometry()
plt.delaxes(fig.axes[-1])
else:
# otherwise use full figure
geom = ((1, 1, 1))
ax = fig.add_subplot(geom[0], geom[1], geom[2], projection='3d',
facecolor='None')
# define color range
tmp = data - data.min()
N = tmp / tmp.max()
# use this colormap
colmap = cm.jet
colmap.set_bad('w', 1.0)
mycolmap = colmap(N) # cm.jet(N)
ph = np.array([])
jc = x.shape[0] // 2
xxf = np.empty((jc + 1, jc + 1, 4))
yyf = xxf
ffld = np.empty((jc, jc, 4))
xff = []
yff = []
fldf = []
for k in range(0, 6):
ix = np.arange(0, ny) + k * ny
xff.append(x[0:ny, ix])
yff.append(y[0:ny, ix])
fldf.append(data[0:ny, ix])
# find the missing corners by interpolation (one in the North Atlantic)
xfodd = (xff[0][-1, 0] + xff[2][-1, 0] + xff[4][-1, 0]) / 3.
yfodd = (yff[0][-1, 0] + yff[2][-1, 0] + yff[4][-1, 0]) / 3.
# and one south of Australia
xfeven = (xff[1][0, -1] + xff[3][0, -1] + xff[5][0, -1]) / 3.
yfeven = (yff[1][0, -1] + yff[3][0, -1] + yff[5][0, -1]) / 3.
# loop over tiles
for k in range(0, 6):
kodd = 2 * (k // 2)
kodd2 = kodd
if kodd == 4: kodd2 = kodd - 6
keven = 2 * (k // 2)
keven2 = keven
if keven == 4: keven2 = keven - 6
fld = fldf[k]
if np.mod(k + 1, 2):
xf = np.vstack([np.column_stack([xff[k], xff[1 + kodd][:, 0]]),
np.flipud(np.append(xff[2 + kodd2][:, 0], xfodd))])
yf = np.vstack([np.column_stack([yff[k], yff[1 + kodd][:, 0]]),
np.flipud(np.append(yff[2 + kodd2][:, 0], yfodd))])
else:
xf = np.column_stack([np.vstack([xff[k], xff[2 + keven2][0, :]]),
np.flipud(np.append(xff[3 + keven2][0, :],
xfeven))])
yf = np.column_stack([np.vstack([yff[k], yff[2 + keven2][0, :]]),
np.flipud(np.append(yff[3 + keven2][0, :],
yfeven))])
if mapit == -1:
ix = np.arange(0, ny) + k * ny
# no projection at all (projection argument is 'sphere'),
# just convert to cartesian coordinates and plot a 3D sphere
deg2rad = np.pi / 180.
xcart, ycart, zcart = _sph2cart(xf * deg2rad, yf * deg2rad)
ax.plot_surface(xcart, ycart, zcart, rstride=1, cstride=1,
facecolors=mycolmap[0:ny, ix],
linewidth=2, shade=False)
ph = np.append(ph, ax)
else:
# divide all faces into 4 because potential problems arise at
# the centers
for kf in range(0, 4):
if kf == 0:
i0, i1, j0, j1 = 0, jc + 1, 0, jc + 1
elif kf == 1:
i0, i1, j0, j1 = 0, jc + 1, jc, 2 * jc + 1
elif kf == 2:
i0, i1, j0, j1 = jc, 2 * jc + 1, 0, jc + 1
elif kf == 3:
i0, i1, j0, j1 = jc, 2 * jc + 1, jc, 2 * jc + 1
xx = xf[i0:i1, j0:j1]
yy = yf[i0:i1, j0:j1]
ff = fld[i0:i1 - 1, j0:j1 - 1]
if np.median(xx) < 0:
xx = np.where(xx >= 180, xx - 360., xx)
else:
xx = np.where(xx <= -180, xx + 360., xx)
# if provided use projection
if mapit == 1: xx, yy = mp(xx, yy)
# now finally plot 4x6 tiles
ph = np.append(ph, plt.pcolormesh(xx, yy, ff,
vmin=cax[0], vmax=cax[1],
**kwargs))
if mapit == -1:
# ax.axis('image')
ax.set_axis_off()
# ax.set_visible=False
# add a reasonable colormap
m = cm.ScalarMappable(cmap=colmap)
m.set_array(data)
plt.colorbar(m)
elif mapit == 0:
ax = fig.axes[-1]
ax.axis('image')
plt.grid('on')
return ph
def _sph2cart(azim_sph_coord, elev_sph_coord):
r = | np.cos(elev_sph_coord) | numpy.cos |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This module is for test output.py saving integrated powder
x-ray diffraction intensities into different file formats.
(Output into different file formats, .chi, .dat, .xye, gsas)
Added a test to check the GSAS file reader and file writer
"""
from __future__ import absolute_import, division, print_function
import six
import os
import math
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import skxray.io.save_powder_output as output
from skxray.io.save_powder_output import gsas_writer
from skxray.io.gsas_file_reader import gsas_reader
def test_save_output():
filename = "function_values"
x = np.arange(0, 100, 1)
y = np.exp(x)
y1 = y*math.erf(0.5)
output.save_output(x, y, filename, q_or_2theta="Q", err=None,
dir_path=None)
output.save_output(x, y, filename, q_or_2theta="2theta", ext=".dat",
err=None, dir_path=None)
output.save_output(x, y, filename, q_or_2theta="2theta", ext=".xye",
err=y1, dir_path=None)
Data_chi = np.loadtxt("function_values.chi", skiprows=7)
Data_dat = np.loadtxt("function_values.dat", skiprows=7)
Data_xye = np.loadtxt("function_values.xye", skiprows=7)
assert_array_almost_equal(x, Data_chi[:, 0])
assert_array_almost_equal(y, Data_chi[:, 1])
assert_array_almost_equal(x, Data_dat[:, 0])
assert_array_almost_equal(y, Data_dat[:, 1])
assert_array_almost_equal(x, Data_xye[:, 0])
| assert_array_almost_equal(y, Data_xye[:, 1]) | numpy.testing.assert_array_almost_equal |
from __future__ import division
import warnings
from pycircstat import CI
from pycircstat.iterators import index_bootstrap
import numpy as np
from scipy import stats
import pandas as pd
class BaseRegressor(object):
"""
Basic regressor object. Mother class to all other regressors.
Regressors support indexing which is passed to the coefficients.
Regressors also support calling. In this case the prediction function is called.
"""
def __init__(self):
self._coef = None
def istrained(self):
"""
Returns whether the regressor is trained of not.
:return: True if trained
"""
return self._coef is not None
def train(self, *args, **kwargs):
raise NotImplementedError(u"{0:s}.train not implemented".format(self.__class__.__name__))
def test(self, *args, **kwargs):
raise NotImplementedError(u"{0:s}.test not implemented".format(self.__class__.__name__))
def loss(self, x, y, lossfunc, ci=None, bootstrap_iter=1000):
"""
Computes loss function between the predictions f(x) and the true y.
:param x: inputs in radians. If multidimensional, each row must
be a specimen and each column a feature.
:param y: desired outputs in radians. If multidimensional, each
row must be a specimen and each column a feature.
:param lossfunc: loss function, must take an array of input and outputs and compute the loss.
:param ci: confidence interval in [0,1]. If not None, bootstrapping is performed.
:param bootstrap_iter: number of bootstrap iterations if
:return: loss as computed by the loss function.
"""
if ci is not None:
yhat = self.predict(x)
l = [lossfunc(y[idx], yhat[idx]) for idx in index_bootstrap(x.shape[0], bootstrap_iter)]
mu = np.mean(l)
q = 1 - ci
return mu, CI(np.percentile(l, q / 2. * 100), np.percentile(l, 1 - q / 2. * 100))
return lossfunc(y, self.predict(x))
def predict(self, *args, **kwargs):
raise NotImplementedError(u"{0:s}.predict not implemented".format(self.__class__.__name__))
def __getitem__(self, item):
return self._coef.__getitem__(item)
def __setitem__(self, key, value):
return self._coef.__getitem__(key, value)
def __call__(self, *args, **kwargs):
assert self.istrained(), "Regressor must be trained first."
return self.predict(*args, **kwargs)
class CL1stOrderRegression(BaseRegressor):
"""
Implements a circular linear regression model of the form
.. math::
x = m + a \\cos(\\alpha - \\alpha_0)
The actual model is equivalently implemented as
.. math::
x = c_1 \\cos(\\alpha) + c_2 \\sin(\\alpha) + m
References: [Jammalamadaka2001]_
"""
def __init__(self):
super(CL1stOrderRegression, self).__init__()
def train(self, alpha, x):
"""
Estimates the regression coefficients. Only works for 1D data.
:param alpha: independent variable, angles in radians
:param x: dependent variable
"""
assert alpha.shape == x.shape, "x and alpha need to have the same shape"
assert len(alpha.shape) == 1, "regression only implemented for 1D data"
assert len(x.shape) == 1, "regression only implemented for 1D data"
X = np.c_[np.cos(alpha), np.sin(alpha), np.ones_like(alpha)]
c = np.dot(np.linalg.pinv(X), x)
self._coef = c
def predict(self, alpha):
"""
Predicts linear values from the angles.
:param alpha: inputs, angles in radians
:return: predictions
"""
X = np.c_[np.cos(alpha), np.sin(alpha), np.ones_like(alpha)]
return np.dot(X, self._coef)
def test(self, alpha, x):
"""
Tests whether alpha and x are significantly correlated.
The test assumes that x is normally distributed. The test
function uses a Shapiro-Wilk test to test this assumption.
:param alpha: independent variable, angles in radians
:param x: dependent variable
:return: test results of Shapiro-Wilk and Liddell-Ord test
:rtype: pandas.DataFrame
References: [Jammalamadaka2001]_
"""
w, psw = stats.shapiro(x)
if psw < 0.05:
warnings.warn("This test requires Gaussian distributed x")
rxc, rxs, rcs = np.corrcoef(x, np.cos(alpha))[0,1], np.corrcoef(x, np.sin(alpha))[0,1], \
np.corrcoef(np.cos(alpha), np.sin(alpha))[0,1]
n = len(alpha)
r2 = (rxc**2 + rxs**2 - 2*rxc*rxs*rcs)/(1 - rcs**2)
f = (n-3)*r2/(1-r2)
p = stats.f.sf(f, 2, n-3)
df = pd.DataFrame(dict(
test = ['Shapiro-Wilk','Liddell-Ord'],
statistics = [w, f],
p = [psw, p],
dof = [None, (2, n-3)]
)).set_index('test')
return df
class CCTrigonometricPolynomialRegression(BaseRegressor):
"""
Implements a circular circular regression model of the form
.. math::
\\cos(\\beta) = a_0 + \\sum_{k=1}^d a_k \\cos(k\\alpha) + b_k \\sin(k\\alpha)
\\sin(\\beta) = c_0 + \\sum_{k=1}^d c_k \\cos(k\\alpha) + d_k \\sin(k\\alpha)
The angles :math:`\\beta` are estimated via :math:`\\hat\\beta = atan2(\\sin(\\beta), \\cos(\\beta))`
:param degree: degree d of the trigonometric polynomials
References: [Jammalamadaka2001]_
"""
def __init__(self, degree=3):
super(CCTrigonometricPolynomialRegression, self).__init__()
self.degree = degree
def train(self, alpha, beta):
"""
Estimates the regression coefficients. Only works for 1D data.
:param alpha: independent variable, angles in radians
:param beta: dependent variable, angles in radians
"""
X = np.vstack([np.ones_like(alpha)] + [np.cos(alpha*k) for k in | np.arange(1., self.degree+1) | numpy.arange |
"""
wbutils.py
Copyright (c) 2022 Sony Group Corporation
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
"""
import numpy as np
def polarAWB_achromatic(imean, weight):
pixels_r = np.copy(imean[..., 0])
pixels_g = | np.copy(imean[..., 1]) | numpy.copy |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = | N.array([1,1,3]) | numpy.array |
## main reference: https://github.com/fspaolo/captoolkit
## author: <NAME>;
## create: 2021.8.8;
import numpy as np
from utils.make_grid import make_grid
from utils.spatial_filter import spatial_filter
from scipy.ndimage import map_coordinates
from scipy.spatial import cKDTree
from scipy.spatial.distance import cdist
from scipy.interpolate import InterpolatedUnivariateSpline
### ------------------------------ ###
### 1-d interpolation ###
### ------------------------------ ###
def interp1d(x, y, xi, n = 1):
""" des: 1D interpolation (spline)
args:
x,y: coord_x and coord_y of the given points.
xi: the interpolation point.
n: degree of the smoothing spline. must be 1 <= n <= 5.
return:
interpolated yi value.
"""
idx = | np.argsort(x) | numpy.argsort |
import signatures.fisherVector as fv
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import Normalizer
def teste():
import csv
import itertools
path = '/Users/romuere/Dropbox/Berkeley/workspace/pycbir 2/files/'
file_train_features = 'feature_vectors_cnn_training.csv'
file_test_features = 'feature_vectors_cnn_test.csv'
file_train_labels = 'labels_cnn_training.csv'
file_test_labels = 'labels_cnn_test.csv'
reader = csv.reader(open(path+file_train_features),delimiter=',')
x = list(reader)
train_features = | np.array(x) | numpy.array |
from __future__ import absolute_import
import numpy as np
import scipy as sp
import logging
from scipy import stats
from fastlmm.pyplink.snpreader.Bed import Bed
#from fastlmm.association.gwas import LeaveOneChromosomeOut, LocoGwas, FastGwas, load_intersect
from fastlmm.association.LeaveOneChromosomeOut import LeaveOneChromosomeOut
from fastlmm.association.PrecomputeLocoPcs import load_intersect
from fastlmm.association.LocoGwas import FastGwas, LocoGwas
from fastlmm.util import run_fastlmmc
from fastlmm.inference import LMM
import unittest
import os.path
import time
from six.moves import range
currentFolder = os.path.dirname(os.path.realpath(__file__))
class TestGwas(unittest.TestCase):
@classmethod
def setUpClass(self):
#self.snpreader_bed = Bed(currentFolder + "../feature_selection/examples/toydata")
#self.pheno_fn = currentFolder + "../feature_selection/examples/toydata.phe"
self.meh = True
def test_loco(self):
"""
test leave one chromosome out iterator
"""
names = ["a", "b", "a", "c", "b", "c", "b"]
loco = LeaveOneChromosomeOut(names)
expect = [[[1,3,4,5,6],[0,2]],
[[0,2,3,5],[1,4,6]],
[[0,1,2,4,6],[3,5]]]
for i, (train_idx, test_idx) in enumerate(loco):
assert (expect[i][0] == train_idx).all()
assert (expect[i][1] == test_idx).all()
#def xtest_results_identical_with_fastlmmcX(self):
# """
# make sure gwas yields same results as fastlmmC
# """
# os.chdir(r"d:\data\carlk\cachebio\genetics\wtccc\data")
# bed_fn = "filtered/wtcfb"
# pheno_fn = r'pheno\cad.txt'
# logging.info("Loading Bed")
# snp_reader = Bed(bed_fn)
# import fastlmm.pyplink.snpset.PositionRange as PositionRange
# snp_set = PositionRange(0,201)
# logging.info("Intersecting and standardizing")
# G, y, _, _ = load_intersect(snp_reader, pheno_fn, snp_set)
# snp_pos = snp_reader.rs
# idx_sim = range(0, 200)
# idx_test = range(200,201)
# #snp_pos_sim = snp_pos[idx_sim]
# #snp_pos_test = snp_pos[idx_test]
# G_chr1, G_chr2 = G[:,idx_sim], G[:,idx_test]
# delta = 4.0
# REML = False
# #gwas_c = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta, REML=REML)
# #gwas_c.run_gwas()
# logging.info("Creating GwasPrototype")
# gwas = GwasPrototype(G_chr1, G_chr2, y, delta, REML=REML)
# logging.info("running GwasPrototype")
# gwas.run_gwas()
# logging.info("finished GwasPrototype")
# #gwas_f = FastGwas(G_chr1, G_chr2, y, delta, findh2=False)
# #gwas_f.run_gwas()
# sorted_snps = snp_pos_test[gwas.p_idx]
## make sure we get p-values right
#np.testing.assert_array_almost_equal(gwas.p_values, gwas_c.p_values, decimal=3)
#np.testing.assert_array_almost_equal(gwas.p_values, gwas_f.p_values, decimal=3)
#np.testing.assert_array_almost_equal(gwas.sorted_p_values, gwas_c.sorted_p_values, decimal=3)
#np.testing.assert_array_almost_equal(gwas.sorted_p_values, gwas_f.sorted_p_values, decimal=3)
def test_results_identical_with_fastlmmc(self):
"""
make sure gwas yields same results as fastlmmC
"""
currentFolder = os.path.dirname(os.path.realpath(__file__))
#prefix = r"C:\Users\chwidmer\Documents\Projects\sandbox\data\test"
#bed_fn = prefix + "/jax_gt.up.filt.M"
#dat_fn = prefix + "/jax_M_expression.1-18.dat"
#pheno_fn = prefix + "/jax_M_expression.19.phe.txt"
bed_fn = os.path.join(currentFolder, "../../feature_selection/examples/toydata")
pheno_fn = os.path.join(currentFolder, "../../feature_selection/examples/toydata.phe")
#prefix = "../../../tests\datasets\mouse"
#bed_fn = os.path.join(prefix, "alldata")
#pheno_fn = os.path.join(prefix, "pheno.txt")
snp_reader = Bed(bed_fn)
G, y, _, _ = load_intersect(snp_reader, pheno_fn)
snp_pos = snp_reader.rs
idx_sim = list(range(0, 5000))
idx_test = list(range(5000, 10000))
snp_pos_sim = snp_pos[idx_sim]
snp_pos_test = snp_pos[idx_test]
G_chr1, G_chr2 = G[:,idx_sim], G[:,idx_test]
delta = 1.0
###################################
# REML IN lmm.py is BROKEN!!
# we compare REML=False in lmm.py to fastlmmc
REML = False
gwas_c_reml = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta, REML=REML)
gwas_c_reml.run_gwas()
gwas = GwasPrototype(G_chr1, G_chr2, y, delta, REML=False)
gwas.run_gwas()
# check p-values in log-space!
np.testing.assert_array_almost_equal(np.log(gwas.p_values), np.log(gwas_c_reml.p_values), decimal=3)
if False:
import pylab
pylab.plot(np.log(gwas_c_reml.p_values), np.log(gwas_f.p_values_F), "x")
pylab.plot(list(range(-66,0,1)), list(range(-66,0,1)))
pylab.show()
# we compare lmm_cov.py to fastlmmc with REML=False
gwas_c = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta, REML=True)
gwas_c.run_gwas()
gwas_f = FastGwas(G_chr1, G_chr2, y, delta, findh2=False)
gwas_f.run_gwas()
np.testing.assert_array_almost_equal(np.log(gwas_c.p_values), np.log(gwas_f.p_values_F), decimal=2)
# additional testing code for the new wrapper functions
# Fix delta
from pysnptools.snpreader import Bed as BedSnpReader
from fastlmm.association.single_snp import single_snp
snpreader = BedSnpReader(bed_fn,count_A1=False)
frame = single_snp(test_snps=snpreader[:,idx_test], pheno=pheno_fn, G0=snpreader[:,idx_sim],h2=1.0/(delta+1.0),leave_out_one_chrom=False,count_A1=False)
sid_list,pvalue_list = frame['SNP'].values,frame['PValue'].values
np.testing.assert_allclose(gwas_f.sorted_p_values_F, pvalue_list, rtol=1e-10)
p_vals_by_genomic_pos = frame.sort_values(["Chr", "ChrPos"])["PValue"].tolist()
np.testing.assert_allclose(gwas_c_reml.p_values, p_vals_by_genomic_pos, rtol=.1)
np.testing.assert_allclose(gwas_c_reml.p_values, gwas_f.p_values_F, rtol=.1)
np.testing.assert_allclose(gwas_f.sorted_p_values_F, gwas_c_reml.sorted_p_values, rtol=.1)
# Search over delta
gwas_c_reml_search = GwasTest(bed_fn, pheno_fn, snp_pos_sim, snp_pos_test, delta=None, REML=True)
gwas_c_reml_search.run_gwas()
frame_search = single_snp(test_snps=snpreader[:,idx_test], pheno=pheno_fn, G0=snpreader[:,idx_sim],h2=None,leave_out_one_chrom=False,count_A1=False)
_,pvalue_list_search = frame_search['SNP'].values,frame_search['PValue'].values
p_vals_by_genomic_pos = frame_search.sort_values(["Chr", "ChrPos"])["PValue"].tolist()
np.testing.assert_allclose(gwas_c_reml_search.p_values, p_vals_by_genomic_pos, rtol=.001)
np.testing.assert_allclose(gwas_c_reml_search.sorted_p_values, pvalue_list_search, rtol=.001)
class GwasPrototype(object):
"""
class to perform genome-wide scan
"""
def __init__(self, train_snps, test_snps, phen, delta=None, cov=None, REML=False, train_pcs=None, mixing=0.0):
"""
set up GWAS object
"""
self.REML = REML
self.train_snps = train_snps
self.test_snps = test_snps
self.phen = phen
if delta is None:
self.delta=None
else:
self.delta = delta * train_snps.shape[1]
self.n_test = test_snps.shape[1]
self.n_ind = len(self.phen)
self.train_pcs = train_pcs
self.mixing = mixing
# add bias if no covariates are used
if cov is None:
self.cov = np.ones((self.n_ind, 1))
else:
self.cov = cov
self.n_cov = self.cov.shape[1]
self.lmm = None
self.res_null = None
self.res_alt = []
self.ll_null = None
self.ll_alt = np.zeros(self.n_test)
self.p_values = np.zeros(self.n_test)
self.sorted_p_values = np.zeros(self.n_test)
# merge covariates and test snps
self.X = np.hstack((self.cov, self.test_snps))
def precompute_UX(self, X):
'''
precompute UX for all snps to be tested
--------------------------------------------------------------------------
Input:
X : [N*D] 2-dimensional array of covariates
--------------------------------------------------------------------------
'''
logging.info("precomputing UX")
self.UX = self.lmm.U.T.dot(X)
self.k = self.lmm.S.shape[0]
self.N = self.lmm.X.shape[0]
if (self.k<self.N):
self.UUX = X - self.lmm.U.dot(self.UX)
logging.info("done.")
def train_null(self):
"""
train model under null hypothesis
"""
logging.info("training null model")
# use LMM
self.lmm = LMM()
self.lmm.setG(self.train_snps, self.train_pcs, a2=self.mixing)
self.lmm.setX(self.cov)
self.lmm.sety(self.phen)
logging.info("finding delta")
if self.delta is None:
result = self.lmm.findH2(REML=self.REML, minH2=0.00001 )
self.delta = 1.0/result['h2']-1.0
# UX = lmm_null.U.dot(test_snps)
self.res_null = self.lmm.nLLeval(delta=self.delta, REML=self.REML)
self.ll_null = -self.res_null["nLL"]
def set_current_UX(self, idx):
"""
set the current UX to pre-trained LMM
"""
si = idx + self.n_cov
self.lmm.X = np.hstack((self.X[:,0:self.n_cov], self.X[:,si:si+1]))
self.lmm.UX = | np.hstack((self.UX[:,0:self.n_cov], self.UX[:,si:si+1])) | numpy.hstack |
"""
Code based on random_transform in keras.processing.image from 2016.
(https://github.com/fchollet/keras)
Keras copyright:
All contributions by <NAME>: Copyright (c) 2015, <NAME>.
All contributions by Google: Copyright (c) 2015, Google, Inc.
All contributions by Microsoft: Copyright (c) 2017, Microsoft, Inc.
All other contributions: Copyright (c) 2015-2017, the respective contributors.
(All rights reserved by copyright holders of all contributions.)
Modified:
Copyright 2017, <NAME>
Copyright 2016, <NAME>
Copyright 2016, <NAME>
"""
import os
import numpy as np
import scipy.ndimage as ndi
import SimpleITK as sitk
"""
Apply data augmentation to all images in an N-dimensional stack. Assumes the
final two axes are spatial axes (not considering the channel axis).
Arguments are as defined for image_random_transform.
"""
def image_stack_random_transform(x, *args, y=None, channel_axis=1, **kwargs):
# Make sure these are numpy arrays.
x_arr = np.array(x)
if y is not None:
y_arr = np.array(y)
x_shape = list(x_arr.shape)
y_shape = list(y_arr.shape)
x_shape[channel_axis] = None
y_shape[channel_axis] = None
if x_shape!=y_shape:
raise ValueError("Error: inputs x and y to "
"image_stack_random_transform must have the same "
"shape. Shapes are {} and {} for x, y."
"".format(x_arr.shape, y_arr.shape))
# Move channel axis to just before spatial axes.
std_channel_axis = x_arr.ndim-1-2
if channel_axis!=std_channel_axis:
x_arr = np.moveaxis(x_arr, source=channel_axis,
destination=std_channel_axis)
if y is not None:
x_arr = np.moveaxis(y_arr, source=channel_axis,
destination=std_channel_axis)
# Compute indices to iterate over (everything except channel and spatial).
x_indices = np.ndindex(x_arr.shape[:-3])
if y is not None:
y_indices = np.ndindex(y_arr.shape[:-3])
# Random transform on each value.
x_out, y_out = None, None
if y is not None:
for idx_x, idx_y in zip(np.ndindex(x_arr.shape[:-3]),
np.ndindex(y_arr.shape[:-3])):
xt, yt = image_random_transform(x_arr[idx_x], y_arr[idx_y],
*args, channel_axis=0, **kwargs)
out_shape_x = x_arr.shape[:-2]+xt.shape[-2:]
out_shape_y = y_arr.shape[:-2]+xt.shape[-2:]
if x_out is None:
x_out = np.zeros(out_shape_x, dtype=np.float32)
if y_out is None:
y_out = np.zeros(out_shape_y, dtype=np.float32)
x_out[idx_x], y_out[idx_y] = xt, yt
else:
for idx_x in np.ndindex(x_arr.shape[:-3]):
xt = image_random_transform(x_arr[idx_x],
*args, channel_axis=0, **kwargs)
out_shape = x_arr.shape[:-2]+xt.shape[-2:]
if x_out is None:
x_out = np.zeros(out_shape, dtype=np.float32)
x_out[idx_x] = xt
# Move channel axis back to where it was.
if channel_axis!=std_channel_axis:
x_out = np.moveaxis(x_out, source=std_channel_axis,
destination=channel_axis)
if y is not None:
y_out = np.moveaxis(y_out, source=std_channel_axis,
destination=channel_axis)
if y is not None:
return x_out, y_out
return x_out
"""
Data augmentation for 2D images using random image transformations. This code
handles on input images alone or jointly on input images and their
corresponding output images (eg. input images and corresponding segmentation
masks).
x : A single 2D input image (ndim=3, channel and 2 spatial dims).
y : A single output image or mask.
rotation_range : Positive degree value, specifying the maximum amount to rotate
the image in any direction about its center.
width_shift_range : Float specifying the maximum distance by which to shift the
image horizontally, as a fraction of the image's width.
height_shift_range : Float specifying the maximum distance by which to shift
the image vertically, as a fraction of the image's height.
shear_range : Positive degree value, specifying the maximum horizontal sheer of
the image.
zoom_range : The maximum absolute deviation of the image scale from one.
(I.e. zoom_range of 0.2 allows zooming the image to scales within the
range [0.8, 1.2]).
intensity_shift_range : The maximum absolute value by which to shift image
intensities up or down.
fill_mode : Once an image is spatially transformed, fill any empty space with
the 'nearest', 'reflect', or 'constant' strategy. Mode 'nearest' fills the
space with the values of the nearest pixels; mode 'reflect' fills the space
with a mirror image of the image along its nearest border or corner;
'constant' fills it with the constant value defined in `cval`.
cval : The constant value with which to fill any empty space in a transformed
input image when using `fill_mode='constant'`.
cvalMask : The constant value with which to fill any empty space in a
transformed target image when using `fill_mode='constant'`.
horizontal_flip : Boolean, whether to randomly flip images horizontally.
vertical_flip : Boolean, whether to randomly flip images vertically.
spline_warp : Boolean, whether to apply a b-spline nonlineary warp.
warp_sigma : Standard deviation of control point jitter in spline warp.
warp_grid_size : Integer s specifying an a grid with s by s control points.
crop_size : Tuple specifying the size of random crops taken of transformed
images. Crops are always taken from within the transformed image, with no
padding.
channel_axis : The axis in the input images that corresponds to the channel.
Remaining axes are the two spatial axes.
rng : A numpy random number generator.
"""
def image_random_transform(x, y=None, rotation_range=0., width_shift_range=0.,
height_shift_range=0., shear_range=0.,
zoom_range=0., intensity_shift_range=0.,
fill_mode='nearest', cval_x=0., cval_y=0.,
horizontal_flip=False, vertical_flip=False,
spline_warp=False, warp_sigma=0.1, warp_grid_size=3,
crop_size=None, channel_axis=0, n_warp_threads=None,
rng=None):
# Set random number generator
if rng is None:
rng = np.random.RandomState()
# x is a single image, so we don't have batch dimension
assert(x.ndim == 3)
if y is not None:
assert(y.ndim == 3)
img_row_index = 1
img_col_index = 2
img_channel_index = channel_axis
# Nonlinear spline warping
if spline_warp:
if n_warp_threads is None:
n_warp_threads = os.cpu_count()
warp_field = _gen_warp_field(shape=x.shape[-2:],
sigma=warp_sigma,
grid_size=warp_grid_size,
n_threads=n_warp_threads,
rng=rng)
x = _apply_warp(x, warp_field,
interpolator=sitk.sitkNearestNeighbor,
fill_mode=fill_mode,
cval=cval_x,
n_threads=n_warp_threads)
if y is not None:
y = np.round(_apply_warp(y, warp_field,
interpolator=sitk.sitkNearestNeighbor,
fill_mode=fill_mode,
cval=cval_y,
n_threads=n_warp_threads))
# use composition of homographies to generate final transform that needs
# to be applied
if np.isscalar(zoom_range):
zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = rng.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
if rotation_range:
theta = np.pi / 180 * rng.uniform(-rotation_range, rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if height_shift_range:
tx = rng.uniform(-height_shift_range, height_shift_range) \
* x.shape[img_row_index]
else:
tx = 0
if width_shift_range:
ty = rng.uniform(-width_shift_range, width_shift_range) \
* x.shape[img_col_index]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if shear_range:
shear = np.pi / 180 * rng.uniform(-shear_range, shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot( | np.dot(rotation_matrix, shear_matrix) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
from typing import Tuple
def rand_orientation_mat(
k: int, p: int, xi: int, seed: None
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a random orientation matrix B*
Arguments:
k {int} -- grid dimension (how many dimensions grid has)
p {int} -- grid level (how many points in grid)
xi {int} -- elementary effect step size
Returns:
Bs {np.ndarray} -- Random orientation matrix B*
Ps {np.ndarray} -- Matrix indicating which vars change
"""
if seed != None:
np.random.seed(seed)
m = k + 1
delta = xi / (p - 1)
v = np.random.choice([-1, 1], size=k, p=[0.5, 0.5])
Ds = np.diag(v)
J = np.ones((m, k))
B = np.tril(J, k=-1)
sv = np.arange(0, (1 - delta) + 1 / (p - 1), 1 / (p - 1))
xs = np.random.choice(sv, size=k, p=np.ones((len(sv),)) / len(sv))[None, :]
ind = np.arange(k)
np.random.shuffle(ind)
Ps = np.eye(k)[:, ind]
Bs = (J[:, 0][:, None] * xs + (delta / 2) * ((2 * B - J) @ Ds + J)) @ Ps
return Bs, Ps
def sampling_matrix(
k: int, p: int, xi: int, r: int, seed=None
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates a sampling matrix X consisting of r random
orientation matrices B*
Arguments:
k {int} -- grid dimension (how many dimensions grid has)
p {int} -- grid level (how many points in grid)
xi {int} -- elementary effect step size
r {int} -- number of elementary effects
Returns:
X {np.ndarray} -- r Random orientation matrices B* row concatenated
P {np.ndarray} -- Matrix indicating which vars change
"""
X = np.zeros(((k + 1) * r, k))
P = | np.zeros((k * r, k)) | numpy.zeros |
import numpy as np
import torch
def getIndeices(shape,height,width,stride,dialation, offset):
H, W = shape
outHeight = (H - dialation*(height-1)-1) // stride +1
outWidth = (W - dialation*(width-1)-1) // stride +1
i0 = np.repeat(np.arange(height)*dialation, width)
i1 = stride * np.repeat( | np.arange(outHeight) | numpy.arange |
import matplotlib.pyplot as plt
# 导入数据集生成工具
import numpy as np
import seaborn as sns
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
from tensorflow.keras import layers, Sequential, regularizers
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams["font.size"] = 16
plt.rcParams["font.family"] = ["STKaiti"]
plt.rcParams["axes.unicode_minus"] = False
OUTPUT_DIR = "output_dir"
N_EPOCHS = 500
def load_dataset():
# 采样点数
N_SAMPLES = 1000
# 测试数量比率
TEST_SIZE = None
# 从 moon 分布中随机采样 1000 个点,并切分为训练集-测试集
X, y = make_moons(n_samples=N_SAMPLES, noise=0.25, random_state=100)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=TEST_SIZE, random_state=42
)
return X, y, X_train, X_test, y_train, y_test
def make_plot(
X,
y,
plot_name,
file_name,
XX=None,
YY=None,
preds=None,
dark=False,
output_dir=OUTPUT_DIR,
):
# 绘制数据集的分布, X 为 2D 坐标, y 为数据点的标签
if dark:
plt.style.use("dark_background")
else:
sns.set_style("whitegrid")
axes = plt.gca()
axes.set_xlim([-2, 3])
axes.set_ylim([-1.5, 2])
axes.set(xlabel="$x_1$", ylabel="$x_2$")
plt.title(plot_name, fontsize=20, fontproperties="SimHei")
plt.subplots_adjust(left=0.20)
plt.subplots_adjust(right=0.80)
if XX is not None and YY is not None and preds is not None:
plt.contourf(
XX, YY, preds.reshape(XX.shape), 25, alpha=0.08, cmap=plt.cm.Spectral
)
plt.contour(
XX,
YY,
preds.reshape(XX.shape),
levels=[0.5],
cmap="Greys",
vmin=0,
vmax=0.6,
)
# 绘制散点图,根据标签区分颜色m=markers
markers = ["o" if i == 1 else "s" for i in y.ravel()]
mscatter(
X[:, 0],
X[:, 1],
c=y.ravel(),
s=20,
cmap=plt.cm.Spectral,
edgecolors="none",
m=markers,
ax=axes,
)
# 保存矢量图
plt.savefig(output_dir + "/" + file_name)
plt.close()
def mscatter(x, y, ax=None, m=None, **kw):
import matplotlib.markers as mmarkers
if not ax:
ax = plt.gca()
sc = ax.scatter(x, y, **kw)
if (m is not None) and (len(m) == len(x)):
paths = []
for marker in m:
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(marker_obj.get_transform())
paths.append(path)
sc.set_paths(paths)
return sc
def network_layers_influence(X_train, y_train):
# 构建 5 种不同层数的网络
for n in range(5):
# 创建容器
model = Sequential()
# 创建第一层
model.add(layers.Dense(8, input_dim=2, activation="relu"))
# 添加 n 层,共 n+2 层
for _ in range(n):
model.add(layers.Dense(32, activation="relu"))
# 创建最末层
model.add(layers.Dense(1, activation="sigmoid"))
# 模型装配与训练
model.compile(
loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"]
)
model.fit(X_train, y_train, epochs=N_EPOCHS, verbose=1)
# 绘制不同层数的网络决策边界曲线
# 可视化的 x 坐标范围为[-2, 3]
xx = np.arange(-2, 3, 0.01)
# 可视化的 y 坐标范围为[-1.5, 2]
yy = | np.arange(-1.5, 2, 0.01) | numpy.arange |
##########
# Code from: https://github.com/tobiasbaumann1/Adaptive_Mechanism_Design
##########
import copy
import logging
import numpy as np
import tensorflow as tf
logging.basicConfig(filename='Planning_Agent.log', level=logging.DEBUG, filemode='w')
from marltoolbox.algos.adaptive_mechanism_design.agent import Agent, convert_from_rllib_env_format
from tensorflow.python.ops import math_ops
def var_shape(x):
out = x.get_shape().as_list()
return out
def intprod(x):
return int(np.prod(x))
def numel(x):
return intprod(var_shape(x))
class Planning_Agent(Agent):
def __init__(self, env, underlying_agents, learning_rate=0.01,
gamma=0.95, max_reward_strength=None, cost_param=0, with_redistribution=False,
value_fn_variant='exact', n_units=None, weight_decay=0.0, convert_a_to_one_hot=False, mean_theta=0.0,
loss_mul_planner=1.0, std_theta=0.1, planner_clip_norm=0.5, normalize_planner=False,
add_state_grad=False, planner_momentum=0.9, use_adam_optimizer=True, use_softmax_hot=True,
square_cost=False, normalize_against_v=False, use_v_pl=False,
normalize_against_vp=False, normalize_vp_separated=False):
super().__init__(env, learning_rate, gamma)
self.underlying_agents = underlying_agents
self.log = []
self.max_reward_strength = max_reward_strength
n_players = len(underlying_agents)
self.with_redistribution = with_redistribution
self.value_fn_variant = value_fn_variant
self.convert_a_to_one_hot = convert_a_to_one_hot
self.env_name = env.NAME
self.env = env
self.loss_mul_planner = loss_mul_planner
with tf.variable_scope('Planner'):
self.s = tf.placeholder(tf.float32, [1, env.n_features], "state_pl")
self.a_players = tf.placeholder(tf.float32, [1, n_players], "player_actions")
self.convertion_to_one_hot(use_softmax_hot)
if value_fn_variant == 'exact':
if self.convert_a_to_one_hot:
self.p_players = tf.placeholder(tf.float32, [1, n_players, env.NUM_ACTIONS], "player_action_probs")
else:
self.p_players = tf.placeholder(tf.float32, [1, n_players], "player_action_probs")
self.a_plan = tf.placeholder(tf.float32, [2, 2], "conditional_planning_actions") # works only for matrix games
self.r_players = tf.placeholder(tf.float32, [1, n_players], "player_rewards")
if self.convert_a_to_one_hot:
self.inputs = tf.concat([self.s, self.a_players_one_hot_reshape], 1)
else:
self.inputs = tf.concat([self.s, self.a_players], 1)
if normalize_planner:
self.inputs = self.inputs-0.5
with tf.variable_scope('Policy_p'):
if self.convert_a_to_one_hot:
ma_action_space_dim = 2 * env.NUM_ACTIONS
else:
ma_action_space_dim = env.NUM_ACTIONS
if not isinstance(n_units, list):
units = [env.n_features + ma_action_space_dim, n_units, n_players]
else:
units = [env.n_features + ma_action_space_dim] + n_units + [n_players]
self.create_multi_layer_fc(units, mean_theta, std_theta)
if max_reward_strength is None:
self.action_layer = self.l1
else:
self.action_layer = tf.sigmoid(self.l1)
with tf.variable_scope('Vp'):
if max_reward_strength is not None:
self.vp = 2 * max_reward_strength * (self.action_layer - 0.5)
else:
self.vp = self.action_layer
with tf.variable_scope('V_total'):
if value_fn_variant == 'proxy':
self.v = 2 * self.a_players - 1
# if value_fn_variant == 'estimated':
if value_fn_variant == 'estimated' or value_fn_variant == 'exact':
if "CoinGame" in self.env_name:
self.v = tf.reduce_sum(self.r_players)
else:
self.v = tf.reduce_sum(self.r_players) - 1.9
# if value_fn_variant == 'exact':
# self.v = tf.placeholder(tf.float32, [1, n_players], "player_values")
with tf.variable_scope('cost_function'):
if value_fn_variant == 'estimated':
self.g_log_pi = tf.placeholder(tf.float32, [env.n_features, n_players], "player_gradients")
cost_list = []
for underlying_agent in underlying_agents:
# policy gradient theorem
idx = underlying_agent.agent_idx
if value_fn_variant == 'estimated':
if "CoinGame" in self.env_name: # or True:
self.g_Vp = self.g_log_pi[:, idx] * self.vp[:, idx]
self.g_V = self.g_log_pi[:, idx] * (self.v[:, idx]
if value_fn_variant == 'proxy'
else self.v)
else:
self.g_Vp = self.g_log_pi[0, idx] * self.vp[0, idx]
self.g_V = self.g_log_pi[0, idx] * (self.v[0, idx]
if value_fn_variant == 'proxy'
else self.v)
if value_fn_variant == 'exact':
act_idx = tf.cast(self.a_players[0, idx], tf.int32)
if self.convert_a_to_one_hot:
self.g_p = self.p_players[0, idx, act_idx] * (1 - self.p_players[0, idx, act_idx])
self.p_opp = self.p_players[0, 1 - idx, act_idx]
grad = tf.gradients(ys=self.vp[0, idx], xs=self.a_players)
if add_state_grad:
grad_s = tf.gradients(ys=self.vp[0, idx], xs=self.s)
self.g_Vp = self.g_p * grad[0][0, idx]
if add_state_grad:
self.g_Vp += self.g_p * tf.reduce_sum(grad_s)
else:
self.g_p = self.p_players[0, idx] * (1 - self.p_players[0, idx])
self.p_opp = self.p_players[0, 1 - idx]
grad = tf.gradients(ys=self.vp[0, idx], xs=self.a_players)
if add_state_grad:
grad_s = tf.gradients(ys=self.vp[0, idx], xs=self.s)
self.g_Vp = self.g_p * grad[0][0, idx]
if add_state_grad:
self.g_Vp += self.g_p * tf.reduce_sum(grad_s)
if "CoinGame" in self.env_name:
if add_state_grad:
self.g_Vp = self.g_Vp / (3*9+4)
self.g_V = self.g_p * tf.reduce_sum(self.v)
else:
if add_state_grad:
self.g_Vp = self.g_Vp / (5+1)
if not use_v_pl:
self.g_V = self.g_p * (self.p_opp * (2 * env.R - env.T - env.S)
+ (1 - self.p_opp) * (env.T + env.S - 2 * env.P))
else:
self.g_V = self.g_p * tf.reduce_sum(self.v)
cost_list.append(- underlying_agent.learning_rate * self.g_Vp * self.g_V)
if with_redistribution:
if square_cost:
self.extra_loss = cost_param * tf.norm(self.vp - tf.reduce_mean(self.vp)) * \
tf.norm(self.vp - tf.reduce_mean(self.vp))
else:
self.extra_loss = cost_param * tf.norm(self.vp - tf.reduce_mean(self.vp))
else:
if square_cost:
self.extra_loss = cost_param * tf.norm(self.vp) * tf.norm(self.vp)
else:
self.extra_loss = cost_param * tf.norm(self.vp)
if not normalize_vp_separated:
self.cost = tf.reduce_sum(tf.stack(cost_list))
else:
self.cost = tf.stack(cost_list, axis=0)
self.dynamic_scaling_vp(normalize_against_vp, max_reward_strength, normalize_vp_separated)
self.dynamic_scaling_v(normalize_against_v)
if planner_clip_norm is not None:
self.cost = tf.clip_by_norm(self.cost, planner_clip_norm, axes=None, name=None)
self.loss = (self.cost + self.extra_loss)
if weight_decay > 0.0:
self.loss += weight_decay * self.weights_norm
with tf.variable_scope('trainPlanningAgent'):
#AdamOptimizer
if use_adam_optimizer:
self.train_op = tf.train.AdamOptimizer(self.loss_mul_planner *learning_rate).minimize(self.loss,
var_list=tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='Planner/Policy_p'))
else:
self.train_op = tf.train.MomentumOptimizer(self.loss_mul_planner *learning_rate,
momentum=planner_momentum).minimize(self.loss,
var_list=tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES,
scope='Planner/Policy_p'))
self.sess.run(tf.global_variables_initializer())
def convertion_to_one_hot(self, use_softmax_hot):
if "CoinGame" in self.env_name:
values = tf.stack([
-tf.math.abs(-self.a_players),
-tf.math.abs(1 - self.a_players),
-tf.math.abs(2 - self.a_players),
-tf.math.abs(3 - self.a_players),
], 2)
values = tf.where(tf.equal(values, -2), values+1, values)
values = tf.where(tf.equal(values, -3), values+2, values)
else:
values = tf.stack([
-tf.math.abs(-self.a_players),
-tf.math.abs(1 - self.a_players),
], 2)
if use_softmax_hot:
self.a_players_one_hot = tf.nn.softmax(values)
else:
self.a_players_one_hot = values + 1
self.a_players_one_hot_reshape = tf.reshape(self.a_players_one_hot, (1, -1))
def create_multi_layer_fc(self, units, mean_theta, std_theta):
print("units", units)
var_list = []
input_ = self.inputs
for i in range(len(units)):
with tf.variable_scope("planner_layer_{}".format(i)):
n_in = units[i]
n_out = units[i + 1]
print("i", i)
print("n_in", n_in)
print("n_out", n_out)
if i + 1 == len(units) - 1:
break
w_l1 = tf.Variable(tf.random_normal([n_in, n_out], mean=0.0, stddev=std_theta))
b_l1 = tf.Variable(tf.random_normal([n_out], mean=0.0, stddev=std_theta))
l1 = tf.nn.leaky_relu(tf.matmul(input_, w_l1) + b_l1)
var_list.extend([w_l1, b_l1])
input_ = l1
self.w_pi0 = tf.Variable(tf.random_normal([n_in, n_out], mean=0.0, stddev=std_theta))
self.b_pi0 = tf.Variable(tf.random_normal([n_out], mean=mean_theta, stddev=std_theta))
self.l1 = tf.matmul(input_, self.w_pi0) + self.b_pi0
var_list.extend([self.w_pi0, self.b_pi0])
self.parameters = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
weights_norm = math_ops.reduce_sum(self.parameters * self.parameters, None, keepdims=True)
self.weights_norm = tf.sqrt(tf.reduce_sum(weights_norm))
def dynamic_scaling_vp(self, normalize_against_vp, max_reward_strength, normalize_vp_separated):
if "CoinGame" in self.env_name:
init_v = 0.1
else:
init_v = 0.0
if not normalize_vp_separated:
self.mean_vp_np = init_v * normalize_against_vp
self.mean_vp_in = tf.placeholder(tf.float32, shape=(), name="mean_vp")
else:
temp_v = init_v * normalize_against_vp
self.mean_vp_np = [temp_v, temp_v]
self.mean_vp_in = tf.placeholder(tf.float32, shape=(2,), name="mean_vp")
if normalize_against_vp:
if not normalize_vp_separated:
null = 0.0
self.mean_vp_out = ((1 - (1 / normalize_against_vp)) * self.mean_vp_in +
tf.math.abs(tf.reduce_sum(self.vp)))
self.cost = tf.cond(tf.equal(self.mean_vp_out, null), lambda: null,
lambda: self.cost / (
self.mean_vp_out / normalize_against_vp * 10 / max_reward_strength))
else:
null = 0.0
self.mean_vp_out = ((1 - (1 / normalize_against_vp)) * self.mean_vp_in +
tf.math.abs(tf.reduce_sum(self.vp, axis=0)))
cost_list = []
cost_list.append(tf.cond(tf.equal(self.mean_vp_out[0], 0.0), lambda: null,
lambda: self.cost[0] / (
self.mean_vp_out[0] / normalize_against_vp * 10 / max_reward_strength)))
cost_list.append(tf.cond(tf.equal(self.mean_vp_out[1], 0.0), lambda: null,
lambda: self.cost[1] / (
self.mean_vp_out[1] / normalize_against_vp * 10 / max_reward_strength)))
self.cost = tf.reduce_sum(tf.stack(cost_list))
else:
self.mean_vp_out = self.mean_vp_in
def dynamic_scaling_v(self, normalize_against_v):
if "CoinGame" in self.env_name:
self.mean_v_np = 0.2 * normalize_against_v
else:
self.mean_v_np = 0.0
self.mean_v_in = tf.placeholder(tf.float32, name="mean_v")
if normalize_against_v:
self.mean_v_out = ((1-(1/normalize_against_v)) * self.mean_v_in +
tf.reduce_sum(tf.math.abs(self.v)))
self.cost = tf.cond(tf.equal(self.mean_v_out, 0.0), lambda:0.0, lambda:self.cost /
(self.mean_v_out/normalize_against_v))
else:
self.mean_v_out = self.mean_v_in
def get_weigths(self):
return self.sess.run(self.parameters, {})
def learn(self, s, a_players, coin_game=False, env_rewards=None):
s = s[np.newaxis, :]
if env_rewards is None:
if coin_game:
# TODO remove hardcoded policy_id
actions = {"player_red": a_players[0], "player_blue": a_players[1]}
r_players_rllib_format = self.env._compute_rewards(s, actions)
else:
r_players_rllib_format = self.env._compute_rewards(*a_players)
r_players = convert_from_rllib_env_format(r_players_rllib_format, self.env.players_ids)
else:
r_players = env_rewards
a_players = np.asarray(a_players)
if self.convert_a_to_one_hot:
a_players_one_hot = self.np_action_to_one_hot(a_players)
feed_dict = {self.s: s,
self.a_players: a_players[np.newaxis, ...],
self.r_players: r_players[np.newaxis, :]}
if self.value_fn_variant == 'estimated':
g_log_pi_list = []
for underlying_agent in self.underlying_agents:
idx = underlying_agent.agent_idx
# if "CoinGame" in self.env_name:
g_log_pi_list.append(underlying_agent.calc_g_log_pi(s, a_players_one_hot[idx])[0][0, ...])
# else:
# g_log_pi_list.append(underlying_agent.calc_g_log_pi(s, a_players[idx]))
# if "CoinGame" in self.env_name:
g_log_pi_arr = np.stack(g_log_pi_list, axis=1)
# else:
# g_log_pi_arr = np.reshape(np.asarray(g_log_pi_list), [1, -1])
# print("g_log_pi_arr", g_log_pi_arr.shape)
feed_dict[self.g_log_pi] = g_log_pi_arr
if self.value_fn_variant == 'exact':
p_players_list = []
for underlying_agent in self.underlying_agents:
idx = underlying_agent.agent_idx
if self.convert_a_to_one_hot:
p_players_list.append(underlying_agent.calc_action_probs(s, add_dim=False))
else:
p_players_list.append(underlying_agent.calc_action_probs(s)[0, -1]) # Only 2 actions
# if "CoinGame" in self.env_name:
# v_list.append(underlying_agent.calcul_value(s, add_dim=False))
if self.convert_a_to_one_hot:
p_players_arr = np.stack(p_players_list, axis=1)
else:
p_players_arr = np.reshape(np.asarray(p_players_list), [1, -1])
feed_dict[self.p_players] = p_players_arr
# if "CoinGame" in self.env_name:
# v_players_arr = np.reshape(np.asarray(v_list), [1, -1])
# feed_dict[self.v] = v_players_arr
# if "CoinGame" not in self.env_name:
# feed_dict[self.a_plan] = self.calc_conditional_planning_actions(s)
feed_dict[self.mean_v_in] = self.mean_v_np
feed_dict[self.mean_vp_in] = self.mean_vp_np
(_, action, loss, g_Vp, g_V, cost, extra_loss, l1,
mean_v, vp, v, mean_vp) = self.sess.run([self.train_op, self.vp, self.loss,
self.g_Vp, self.g_V,
self.cost, self.extra_loss, self.l1,
self.mean_v_out, self.vp, self.v,
self.mean_vp_out], feed_dict)
self.mean_v_np = mean_v
self.mean_vp_np = mean_vp
return action, loss, g_Vp, g_V, r_players, cost, extra_loss, l1, mean_v, vp, v, mean_vp
def get_log(self):
return self.log
def np_action_to_one_hot(self, a_players):
a_players_one_hot = np.zeros((len(a_players), self.env.NUM_ACTIONS))
for idx, act in enumerate(a_players.tolist()):
a_players_one_hot[idx, act] = 1
return a_players_one_hot
def choose_action(self, s, a_players):
s = s[np.newaxis, :]
a_players = np.asarray(a_players)
a_plan = self.sess.run(self.vp, {self.s: s,
self.a_players: a_players[np.newaxis, ...]})[0, :]
# self.log.append(self.calc_conditional_planning_actions(s))
return a_plan
def calc_conditional_planning_actions(self, s):
assert "CoinGame" not in self.env_name
# Planning actions in each of the 4 cases: DD, CD, DC, CC
a_plan_DD = self.sess.run(self.action_layer, {self.s: s, self.a_players: np.array([0, 0])[np.newaxis, :]})
a_plan_CD = self.sess.run(self.action_layer, {self.s: s, self.a_players: np.array([1, 0])[np.newaxis, :]})
a_plan_DC = self.sess.run(self.action_layer, {self.s: s, self.a_players: np.array([0, 1])[np.newaxis, :]})
a_plan_CC = self.sess.run(self.action_layer, {self.s: s, self.a_players: np.array([1, 1])[np.newaxis, :]})
l_temp = [a_plan_DD, a_plan_CD, a_plan_DC, a_plan_CC]
if self.max_reward_strength is not None:
l = [2 * self.max_reward_strength * (a_plan_X[0, 0] - 0.5) for a_plan_X in l_temp]
else:
l = [a_plan_X[0, 0] for a_plan_X in l_temp]
if self.with_redistribution:
if self.max_reward_strength is not None:
l2 = [2 * self.max_reward_strength * (a_plan_X[0, 1] - 0.5) for a_plan_X in l_temp]
else:
l2 = [a_plan_X[0, 1] for a_plan_X in l_temp]
l = [0.5 * (elt[0] - elt[1]) for elt in zip(l, l2)]
return np.transpose(np.reshape( | np.asarray(l) | numpy.asarray |
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
# License: BSD 3-clause
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
def singleplot(data, horizon, title, filepath, xy_labels=None):
"""
Simple plot of a data array.
:param data: Data array
:param horizon: Length of the x-axis
:param title: Figure title
:param xy_labels: Axis labels
:param filepath: Absolute path to save figure on the filesystem
"""
x_axis = np.arange(0.0, horizon, 1.0)
plt.plot(x_axis, data)
plt.title(title)
if xy_labels:
plt.xlabel(xy_labels[0])
plt.ylabel(xy_labels[1])
plt.savefig(filepath)
plt.clf()
# List of (algo_name, data), horizon, file descriptor
def multiplot(lines_tuple, horizon, title, filepath, xy_labels=None):
# Plotting mean reward as a function of time
"""
Plot multiple graphs on the same figure
:param lines_tuple:
List of tuples each containing (name, data)
:param horizon: Length of the x-axis
:param title: Figure title
:param xy_labels: Axis labels
:param filepath: Absolute path to save figure on the filesystem
"""
x_axis = | np.arange(0.0, horizon, 1.0) | numpy.arange |
# https://pypi.org/project/cykooz.heif/
# Modified by <NAME> from https://shuangz.com/projects/materialgan-sa20/
import sys
import numpy as np
from matplotlib import pyplot as plt
import shutil
import cv2
import os
import pickle
import glob
import obj_points
from PIL import Image
sys.path.insert(1, 'materialGAN/src/')
from util import *
np.set_printoptions(precision=4, suppress=True)
def rotate_needed(img):
img = np.array(img.convert('L'))
arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_APRILTAG_36h11)
arucoParams = cv2.aruco.DetectorParameters_create()
(corners, ids, rejected) = cv2.aruco.detectMarkers(img, arucoDict, parameters=arucoParams)
# Get the first corner
c = corners[0][0]
c1 = c[1]
c2 = c[0]
c3 = c[3]
c4 = c[2]
if abs((c1[0]+c2[0])-(c3[0]+c4[0])) < abs((c1[1]+c2[1])-(c3[1]+c4[1])):
if (c1[0]+c4[0]) < (c2[0]+c3[0]):
return 0
else:
return 180
else:
if (c1[0]+c2[0]) < (c3[0]+c4[0]):
return -90
else:
return 90
def marker_detection(img, in_dir, idx, flag=True):
img_gray = np.array(img)
arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_APRILTAG_36h11)
arucoParams = cv2.aruco.DetectorParameters_create()
(corners, ids, rejected) = cv2.aruco.detectMarkers(img_gray, arucoDict, parameters=arucoParams)
tag_id_list = []
corners_list = []
for tag in ids:
tag_id_list.append(tag[0])
for corner in corners:
# Convert order from 0123 to 1032
coord = corner[0]
new_corner = [coord[0], coord[1], coord[2], coord[3]]
corners_list.append(np.array(new_corner, dtype='float32'))
if flag:
plt.imshow(np.array(img))
for corners in corners_list:
plt.plot(corners[0,0], corners[0,1], 'r.')
plt.plot(corners[1,0], corners[1,1], 'g.')
plt.plot(corners[2,0], corners[2,1], 'b.')
plt.plot(corners[3,0], corners[3,1], 'y.')
plt.title(idx)
plt.savefig(os.path.join(in_dir, 'detect_%02d.jpg' % idx))
plt.close()
imagePoints = np.vstack(corners_list)
print('%d points are detected in image %d' % (imagePoints.shape[0], idx))
return imagePoints, tag_id_list
def preprocess(in_dir, tmp_dir):
print(os.path.join(in_dir, '*.*'))
dir_list = sorted(glob.glob(os.path.join(in_dir, '*.*')))
if len(dir_list) == 0:
print('No inputs given!')
exit()
for idx, dir in enumerate(dir_list):
img = Image.open(dir)
rot = rotate_needed(img)
img = img.rotate(rot, expand=True)
img.save(os.path.join(tmp_dir, 'orig_%02d.png' % idx), 'JPEG')
def calibrate(imgs, in_dir, objectPoints, flag):
N = len(imgs)
W = imgs[0].width
H = imgs[0].height
objectPoints_list = []
imagePoints_list = []
for idx in range(N):
print('Calibrate image: %d' % idx)
img = imgs[idx]
imagePoints_this, tag_list = marker_detection(img, in_dir, idx, flag)
imagePoints_list.append(imagePoints_this)
objectPoints_this = []
for tag in tag_list:
objectPoints_this.append(objectPoints[tag*4:(tag+1)*4, :])
objectPoints_this = np.vstack(objectPoints_this)
objectPoints_list.append(objectPoints_this)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objectPoints_list, imagePoints_list,
(W,H), None, None)
return mtx, dist, list(rvecs), list(tvecs), objectPoints_list, imagePoints_list
def reorder(arr, list1, list2, list3, list4, list5):
def move_row_to_end(mat, id):
mat = np.append(mat, np.reshape(mat[id,:], (-1,3)), axis=0)
mat = | np.delete(mat, id, axis=0) | numpy.delete |
import os
import sys
import yaml
import logging
import logging.config
import time
import random
import math
import numpy as np
from numpy import array
import torch
from src import INIT_TYPE, TEST_TYPE, GEN_TYPE
from train_grasp import TrainGrasp
from eval_grasp import EvaluateGrasp
from util.misc import *
from util.mesh import *
def generate_by_chaining_2D(prim_dir_list, new_dir, num_new, keep_concave_part):
"""
Generate new objects by randomly chaining primitives
"""
num_obj_new = 0
height_all = []
prim_dir_obj_all = []
for prim_dir in prim_dir_list:
prim_dir_obj_all += [fnmatch.filter(os.listdir(prim_dir), '*.stl')]
while 1:
# Randomly choose one from each dir, and then chain them; or choose multiple ones from the same dir
chosen_prim_path_list = []
if len(prim_dir_list) > 1:
for dir_ind, prim_dir in enumerate(prim_dir_list):
chosen_prim = random.choice(prim_dir_obj_all[dir_ind])
chosen_prim_path_list += [prim_dir+chosen_prim]
else:
chosen_prim = random.sample(prim_dir_obj_all[0], k=2)
chosen_prim_path_list = [prim_dir_list[0]+prim for prim in chosen_prim]
try:
chained_mesh = chain_mesh_2D(prim_path_list=chosen_prim_path_list)
processed_mesh = process_mesh(chained_mesh,
scale_down=False,
random_scale=True)
ensure_directory_hard(new_dir + str(num_obj_new) + '/')
convex_pieces = save_convex_urdf(processed_mesh,
new_dir,
num_obj_new,
mass=0.1,
keep_concave_part=keep_concave_part)
height_all +=[processed_mesh.bounds[1,2]-processed_mesh.bounds[0,2]]
except:
print('Cannot chain!')
continue
# Skip if too many convex pieces
if len(convex_pieces) > 20:
print('Too concave!')
continue
# Count
num_obj_new += 1
if num_obj_new == num_new:
return height_all
def generate_by_chaining_3D(prim_dir_list, new_dir, num_new, keep_concave_part, target_xy_range, max_z):
"""
Generate new objects by randomly chaining primitives
"""
num_obj_new = 0
height_all = []
prim_dir_obj_all = []
for prim_dir in prim_dir_list:
prim_dir_obj_all += [fnmatch.filter(os.listdir(prim_dir), '*.stl')]
max_num_attempt = num_new*5
num_attempt = 0
while 1:
num_attempt += 1
if num_attempt == max_num_attempt:
raise ValueError('Chaining failed too often')
# Randomly choose one from each dir, and then chain them; or choose multiple ones from the same dir
chosen_prim_path_list = []
if len(prim_dir_list) > 1:
for dir_ind, prim_dir in enumerate(prim_dir_list):
chosen_prim = random.choice(prim_dir_obj_all[dir_ind])
chosen_prim_path_list += [prim_dir+chosen_prim]
else:
chosen_prim = random.sample(prim_dir_obj_all[0], k=2) # chain 2 parts
chosen_prim_path_list = [prim_dir_list[0]+prim for prim in chosen_prim]
# try:
chained_mesh = chain_mesh_3D(prim_path_list=chosen_prim_path_list)
if chained_mesh is None:
print('Cannot chain!')
continue
try:
processed_mesh = process_mesh(chained_mesh,
remove_body=False, #!
scale_down=False,
random_scale=False)
processed_mesh = random_scale_down_mesh(processed_mesh,
target_xy_range=target_xy_range,
max_z=max_z)
ensure_directory_hard(new_dir + str(num_obj_new) + '/')
convex_pieces = save_convex_urdf(processed_mesh,
new_dir,
num_obj_new,
mass=0.1,
keep_concave_part=keep_concave_part)
except:
print('Cannot process!')
continue
# Skip if too many convex pieces
if len(convex_pieces) > 20:
print('Too concave!')
continue
# Count
height_all += [processed_mesh.bounds[1,2]-processed_mesh.bounds[0,2]]
num_obj_new += 1
if num_obj_new == num_new:
return height_all
if __name__ == '__main__':
# from IPython import embed; embed()
if os.cpu_count() > 20: # somehow on server, the default fork method does not work with pytorch, but works fine on desktop
import multiprocessing
multiprocessing.set_start_method('forkserver')
# Read config
yaml_file_name = sys.argv[1]
yaml_path = 'configs/'+yaml_file_name
with open(yaml_path+'.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
# Fix seeds
seed = config['seed']
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = True
# Hardware
cuda_idx = config['cuda_idx']
device = 'cuda:'+str(cuda_idx)
# Misc
num_eval_per_env = config['num_eval_per_env']
# Data
initial_env_dir_list = config['initial_env_dir_list']
num_env_per_initial_dir = config['num_env_per_initial_dir']
test_env_dir_list = config['test_env_dir_list']
num_env_per_test_dir = config['num_env_per_test_dir']
# Domain Randomization (chaining primitives)
dr_method = config['dr_method']
target_xy_range = config['target_xy_range']
max_z = config['max_z']
keep_concave_part = config['keep_concave_part']
num_retrain = config['num_retrain']
num_env_per_gen = config['num_env_per_gen']
num_env_per_retrain = config['num_env_per_retrain']
retrain_sample_recency = config['retrain_sample_recency']
mu_list = config['mu_list']
mu = config['mu']
sigma = config['sigma']
retrain_args = config['retrain_args']
eval_args = config['eval_args']
# Initialize folders
data_parent_dir = config['data_parent_dir']
result_dir = 'result/'+yaml_file_name+'/'
data_dir = data_parent_dir+yaml_file_name+'/'
ensure_directory(result_dir)
ensure_directory(data_dir)
# Initialize dir dict: key is dir_path, value is a tuple of (1) id list and (2) type (0 for initial, 1 for test, 2 for gen)
env_dir_dict = {}
for env_dir in initial_env_dir_list:
height_all =list(np.load(env_dir+'dim.npy')[:num_env_per_initial_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_initial_dir)], height_all, INIT_TYPE)
# Save a copy of configuration
with open(result_dir+'config.yaml', 'w') as f:
yaml.dump(config, f, sort_keys=False)
# Initialize evaluating policy (always cpu)
evaluator = EvaluateGrasp(initial_policy_path=None,
mu_list=mu_list, mu=mu, sigma=sigma, **eval_args)
# Initialize training policy
trainer = TrainGrasp(result_dir=result_dir, device=device,
mu=mu, sigma=sigma, **retrain_args)
# Training details to be recorded
train_success_list = [] # using initial objects
test_success_list = []
# Add test dir to dict
for env_dir in test_env_dir_list:
height_all = list(np.load(env_dir+'dim.npy')[:num_env_per_test_dir,2])
env_dir_dict[env_dir] = ([*range(num_env_per_test_dir)], height_all, TEST_TYPE)
# Name of saved training details
train_details_path = None
# Logging
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.basicConfig(filename=result_dir+'log.txt',
level=logging.NOTSET,
format='%(process)d-%(levelname)s-%(asctime)s-%(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.info('start')
# Run
for epoch in range(num_retrain):
# Record time for each epoch
epoch_start_time = time.time()
######################### New #########################
if epoch > 0 and dr_method: # no gen at the beginning
# Declare new path
new_gen_dir = data_dir + 'gen_' + str(epoch) + '/'
ensure_directory(new_gen_dir)
print('Generating new...')
if dr_method == 'chain_3D':
dr_func = generate_by_chaining_3D
elif dr_method == 'chain':
dr_func = generate_by_chaining_2D
height_all = dr_func(prim_dir_list=initial_env_dir_list,
new_dir=new_gen_dir,
num_new=num_env_per_gen,
keep_concave_part=keep_concave_part,
target_xy_range=target_xy_range,
max_z=max_z)
new_env_id_list = np.arange(num_env_per_gen)
# Evaluate label of new envs - use mu_list
print('Evaluating newly generated...')
mu_batch = array(evaluator.evaluate(obj_dir=new_gen_dir,
obj_id_list=new_env_id_list,
obj_height_list=height_all,
num_eval=num_eval_per_env)[1], dtype='float')
label_batch = get_label_from_mu(mu_batch, mu_list)
print('Reward of newly generated: ', np.mean(label_batch))
logging.info(f'Reward of newly generated: {np.mean(label_batch)}')
# Add to dir dict
env_dir_dict[new_gen_dir] = (new_env_id_list, height_all, GEN_TYPE)
######################### Retrain #########################
print(f'Retraining policy {epoch}...')
logging.info(f'Retraining policy {epoch}...')
# Pick which envs for training
retrain_env_path_available_all = []
retrain_env_height_available_all = []
retrain_env_weight_all = []
gen_dir_count = 0
for env_dir, (env_id_list, height_list, dir_type) in env_dir_dict.items():
if dir_type != TEST_TYPE:
retrain_env_path_available_all += [env_dir+str(id)+'.urdf' for id in env_id_list]
retrain_env_height_available_all += height_list
if dir_type == INIT_TYPE:
retrain_env_weight_all += [1]*len(env_id_list)
elif dir_type == GEN_TYPE:
retrain_env_weight_all += [math.exp(gen_dir_count*retrain_sample_recency)]*len(env_id_list)
gen_dir_count += 1
retrain_env_weight_all = array(retrain_env_weight_all)/np.sum(array(retrain_env_weight_all))
retrain_env_path_list, chosen_id_list = weighted_sample_without_replacement(retrain_env_path_available_all, retrain_env_weight_all, k=min(num_env_per_retrain, len(retrain_env_path_available_all)))
retrain_env_height_list = list(array(retrain_env_height_available_all)[chosen_id_list])
# Use more itrs at 1st retrain
retrain_args_copy = dict(retrain_args) # make a copy
retrain_args_copy.pop('num_step_initial', None)
if epoch == 0:
retrain_args_copy['num_step'] = retrain_args['num_step_initial']
new_policy_path = trainer.run(obj_path_all=retrain_env_path_list,
obj_height_all=retrain_env_height_list,
prefix='epoch_'+str(epoch),
**retrain_args_copy)
logging.info(f'Epoch {epoch} retrain, new policy {new_policy_path}')
# Update evaluator
trainer.load_policy(new_policy_path)
evaluator.load_policy(new_policy_path)
######################### Re-evaluate #########################
# INIT - eval for train_success and label
train_success_batch = np.empty((0), dtype='float')
train_success_dirs = []
for env_dir, (env_id_list, height_list, dir_type) in env_dir_dict.items():
if dir_type == INIT_TYPE:
mu_batch = array(evaluator.evaluate(obj_dir=env_dir,
obj_id_list=env_id_list,
obj_height_list=height_list,
num_eval=num_eval_per_env)[1], dtype='float')
label_batch = get_label_from_mu(mu_batch, mu_list)
train_success_batch = np.concatenate((train_success_batch, label_batch))
train_success_dirs += [ | np.mean(label_batch) | numpy.mean |
from typing import Tuple
import numpy as np
import scipy.linalg as spla
import scipy.stats as spst
from iliad.statistics import normal
from iliad.integrators.states import State, LagrangianLeapfrogState, RiemannianLeapfrogState, SoftAbsLeapfrogState
from odyssey.distribution import Distribution
def mean_and_invcov_and_logdet(
state: RiemannianLeapfrogState,
eps: float,
langevin_type: str
) -> Tuple[np.ndarray, np.ndarray, float]:
"""Computes the mean, inverse covariance, and log-determinant of the covariance
matrix for the proposal distribution of the Riemannian Metropolis-adjusted
Langevin algorithm.
Args:
state: An object containing the position and momentum variables of the
state in phase space, and possibly previously computed log-posterior,
metrics, and gradients.
eps: The discretization step-size.
langevin_type: Indicates what kind of Langevin stochastic differential
equation to integrator.
Returns:
mean: Mean of the MMALA proposal.
invcov: Inverse covariance of the MMALA proposal.
logdet: The log-determinant of the MMALA covariance matrix.
gamma: Riemannian component of the drift function.
"""
if langevin_type == 'mala':
q = state.position
k = len(q)
glp = state.grad_log_posterior
invcov = np.eye(k) / eps**2
logdet = 2*k*np.log(eps)
mean = q + 0.5*eps**2*glp
gamma = 0.0
return mean, invcov, logdet, gamma
elif langevin_type == 'mmala':
q = state.position
k = len(q)
glp = state.grad_log_posterior
G = state.metric
iG = state.inv_metric
L = state.sqrtm_metric
dG = state.jac_metric
n = 0.5*iG.dot(glp)
gamma = -0.5*np.sum(np.einsum('ik,jk->ij', iG, np.einsum('mkj,mj->jk', dG, iG)), axis=-1)
mean = q + eps**2*(n + gamma)
invcov = G / eps**2
logdet = -2*np.sum(np.log(np.diag(L))) + 2*k*np.log(eps)
return mean, invcov, logdet, gamma
elif langevin_type == 'smala':
q = state.position
k = len(q)
glp = state.grad_log_posterior
G = state.metric
iG = state.inv_metric
L = state.sqrtm_metric
n = 0.5*iG.dot(glp)
gamma = 0.0
mean = q + eps**2*(n + gamma)
invcov = G / eps**2
logdet = -2*np.sum(np.log( | np.diag(L) | numpy.diag |
#!/usr/bin/env python3
import argparse
import sys
from os import system, devnull
from math import log
from math import ceil
import numpy as np
from scipy.signal import argrelextrema
# hetkmers dependencies
from collections import defaultdict
from itertools import combinations
version = '0.2.3dev_rn'
############################
# processing of user input #
############################
class Parser:
def __init__(self):
argparser = argparse.ArgumentParser(
# description='Inference of ploidy and heterozygosity structure using whole genome sequencing data',
usage='''smudgeplot <task> [options] \n
tasks: cutoff Calculate meaningful values for lower/upper kmer histogram cutoff.
hetkmers Calculate unique kmer pairs from a Jellyfish or KMC dump file.
aggregate Retrieve unique k-mer pairs from files containing IDs.
plot Generate 2d histogram; infere ploidy and plot a smudgeplot.
\n\n''')
argparser.add_argument('task', help='Task to execute; for task specific options execute smudgeplot <task> -h')
argparser.add_argument('-v', '--version', action="store_true", default=False, help="print the version and exit")
# print version is a special case
if len(sys.argv) > 1:
if sys.argv[1] in ['-v', '--version']:
self.task = "version"
return
# the following line either prints help and die; or assign the name of task to variable task
self.task = argparser.parse_args([sys.argv[1]]).task
else:
self.task = ""
# if the task is known (i.e. defined in this file);
if hasattr(self, self.task):
# load arguments of that task
getattr(self, self.task)()
else:
argparser.print_usage()
sys.stderr.write('"' + self.task + '" is not a valid task name\n')
exit(1)
def hetkmers(self):
"""
Calculate unique kmer pairs from a Jellyfish or KMC dump file.
"""
argparser = argparse.ArgumentParser(prog='smudgeplot hetkmers',
description='Calculate unique kmer pairs from a Jellyfish or KMC dump '
'file.')
argparser.add_argument('infile',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help='Alphabetically sorted Jellyfish or KMC dump file (stdin).')
argparser.add_argument('-o',
help='The pattern used to name the output (kmerpairs).',
default='kmerpairs')
argparser.add_argument('--pos',
help='Position in k-mer to look for pairs, 0-based, min(1) - max(k-2)',
dest='pos',
type=int)
self.arguments = argparser.parse_args(sys.argv[2:])
def plot(self):
"""
Generate 2d histogram; infer ploidy and plot a smudgeplot.
"""
argparser = argparse.ArgumentParser(prog='smudgeplot plot',
description='Generate 2d histogram for smudgeplot')
argparser.add_argument('infile',
nargs='?',
help='name of the input tsv file with covarages (default \"coverages_2.tsv\")."')
argparser.add_argument('-o',
help='The pattern used to name the output (smudgeplot).',
default='smudgeplot')
argparser.add_argument('-q',
help='Remove kmer pairs with coverage over the specified quantile; (default none).',
type=float,
default=1)
argparser.add_argument('-L',
help='The lower boundary used when dumping kmers (default min(total_pair_cov) / 2).',
type=int,
default=0)
argparser.add_argument('-n',
help='The expected haploid coverage (default estimated from data).',
type=float,
default=0)
argparser.add_argument('-t',
'--title',
help='name printed at the top of the smudgeplot (default none).',
default='')
# argparser.add_argument('-m',
# '-method',
# help='The algorithm for annotation of smudges (default \'local_aggregation\')',
# default='local_aggregation')
argparser.add_argument('-nbins',
help='The number of nbins used for '
'smudgeplot matrix (nbins x nbins) (default autodetection).',
type=int,
default=0)
argparser.add_argument('-k',
help='The length of the kmer.',
default=21)
# argparser.add_argument('-kmer_file',
# help='Name of the input files containing kmer sequences '
# '(assuming the same order as in the coverage file)',
# default="")
argparser.add_argument('--homozygous',
action="store_true",
default=False,
help="Assume no heterozygosity in the "
"genome - plotting a paralog structure; (default False).")
self.arguments = argparser.parse_args(sys.argv[2:])
def aggregate(self):
"""
Retrieve unique k-mer pairs from files containing IDs.
"""
argparser = argparse.ArgumentParser(prog='smudgeplot aggregate',
description='Retrieve unique k-mer pairs from files containing IDs.')
argparser.add_argument('--infile',
required=True,
type=argparse.FileType('r'),
help='Alphabetically sorted Jellyfish or KMC dump file.')
argparser.add_argument('--index_files',
required=True,
nargs='*',
type=argparse.FileType('r'),
help='Multiple indices files output of smudgeplot.py hetkmers --pos.')
argparser.add_argument('-o',
help='The pattern used to name the output (kmerpairs).', default='kmerpairs')
argparser.add_argument('--cov_only',
action='store_true',
default=False,
help='Only write one file for the coverage of the pairs.')
self.arguments = argparser.parse_args(sys.argv[2:])
def cutoff(self):
"""
Calculate meaningful values for lower/upper kmer histogram cutoff.
"""
argparser = argparse.ArgumentParser(prog='smudgeplot cutoff',
description='Calculate meaningful values for lower/upper kmer '
'histogram cutoff.')
argparser.add_argument('infile',
type=argparse.FileType('r'),
help='Name of the input kmer histogram file (default \"kmer.hist\")."')
argparser.add_argument('boundary',
help='Which bounary to compute L (lower) or U (upper)')
self.arguments = argparser.parse_args(sys.argv[2:])
###############
# task cutoff #
###############
def round_up_nice(x):
digits = ceil(log(x, 10))
if digits <= 1:
multiplier = 10 ** (digits - 1)
else:
multiplier = 10 ** (digits - 2)
return ceil(x / multiplier) * multiplier
def cutoff(args):
# kmer_hist = open("data/Mflo2/kmer.hist","r")
kmer_hist = args.infile
hist = np.array([int(line.split()[1]) for line in kmer_hist])
if args.boundary == "L":
local_minima = argrelextrema(hist, np.less)[0][0]
L = max(10, int(round(local_minima * 1.25)))
sys.stdout.write(str(L))
else:
# take 99.8 quantile of kmers that are more than one in the read set
hist_rel_cumsum = np.cumsum(hist[1:]) / np.sum(hist[1:])
U = round_up_nice( | np.argmax(hist_rel_cumsum > 0.998) | numpy.argmax |
# Copyright (c) 2017-2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali.tfrecord as tfrec
import nvidia.dali as dali
from nvidia.dali.backend_impl import TensorListGPU
from timeit import default_timer as timer
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose
import os
import random
from PIL import Image
from test_utils import check_batch
from test_utils import compare_pipelines
from test_utils import get_dali_extra_path
test_data_root = get_dali_extra_path()
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
c2lmdb_db_folder = os.path.join(test_data_root, 'db', 'c2lmdb')
recordio_db_folder = os.path.join(test_data_root, 'db', 'recordio')
tfrecord_db_folder = os.path.join(test_data_root, 'db', 'tfrecord')
jpeg_folder = os.path.join(test_data_root, 'db', 'single', 'jpeg')
coco_image_folder = os.path.join(test_data_root, 'db', 'coco', 'images')
coco_annotation_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
test_data_video = os.path.join(test_data_root, 'db', 'optical_flow', 'sintel_trailer')
def test_tensor_multiple_uses():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.res = ops.Resize(device="cpu", resize_x=224, resize_y=224)
self.dump_cpu = ops.DumpImage(device = "cpu", suffix = "cpu")
self.dump_gpu = ops.DumpImage(device = "gpu", suffix = "gpu")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images = self.res(images)
images_cpu = self.dump_cpu(images)
images_gpu = self.dump_gpu(images.gpu())
return (images, images_cpu, images_gpu)
pipe = HybridPipe(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
pipe.build()
out = pipe.run()
assert(out[0].is_dense_tensor())
assert(out[1].is_dense_tensor())
assert(out[2].is_dense_tensor())
assert(out[0].as_tensor().shape() == out[1].as_tensor().shape())
assert(out[0].as_tensor().shape() == out[2].as_tensor().shape())
a_raw = out[0]
a_cpu = out[1]
a_gpu = out[2].as_cpu()
for i in range(batch_size):
t_raw = a_raw.at(i)
t_cpu = a_cpu.at(i)
assert(np.sum(np.abs(t_cpu - t_raw)) == 0)
t_cpu = a_cpu.at(i)
t_gpu = a_gpu.at(i)
assert(np.sum(np.abs(t_cpu - t_gpu)) == 0)
def test_multiple_input_sets():
batch_size = 32
file_root = os.path.join(test_data_root, 'db', 'coco', 'images')
annotations_file = os.path.join(test_data_root, 'db', 'coco', 'instances.json')
class MISPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(MISPipe, self).__init__(batch_size, num_threads, device_id, num_gpus)
# Reading COCO dataset
self.input = ops.COCOReader(
file_root=file_root,
annotations_file=annotations_file,
shard_id=device_id,
num_shards=num_gpus,
ratio=True,
ltrb=True,
random_shuffle=False)
self.decode_cpu = ops.ImageDecoder(device="cpu", output_type=types.RGB)
self.decode_crop = ops.ImageDecoderSlice(device="cpu", output_type=types.RGB)
self.ssd_crop = ops.SSDRandomCrop(device="cpu", num_attempts=1, seed=0)
default_boxes = [0.0, 0.0, 1.0, 1.0]
self.box_encoder_cpu = ops.BoxEncoder(device="cpu", criteria=0.5, anchors=default_boxes)
def define_graph(self):
# Do separate augmentations
inputs0, boxes0, labels0 = self.input(name="Reader0")
image0 = self.decode_cpu(inputs0)
image_ssd0, boxes_ssd0, labels_ssd0 = self.ssd_crop(image0, boxes0, labels0)
inputs1, boxes1, labels1 = self.input(name="Reader1")
image1 = self.decode_cpu(inputs1)
image_ssd1, boxes_ssd1, labels_ssd1 = self.ssd_crop(image1, boxes1, labels1)
encoded_boxes0, encoded_labels0 = self.box_encoder_cpu(boxes_ssd0, labels_ssd0)
encoded_boxes1, encoded_labels1 = self.box_encoder_cpu(boxes_ssd1, labels_ssd1)
# Pack into Multiple Input Sets and gather multiple output lists
boxes = [boxes_ssd0, boxes_ssd1]
labels = [labels_ssd0, labels_ssd1]
enc_boxes0, enc_labels0 = self.box_encoder_cpu(boxes, labels)
# Test one list with one _EdgeReference
enc_boxes1, enc_labels1 = self.box_encoder_cpu(boxes, labels_ssd0)
# Return everything (only _EdgeReference allowed)
return (encoded_boxes0, encoded_labels0, encoded_boxes1, encoded_labels1,
enc_boxes0[0], enc_labels0[0], enc_boxes0[1], enc_labels0[1],
enc_boxes1[0], enc_labels1[0], enc_boxes1[1], enc_labels1[1])
pipe = MISPipe(batch_size = batch_size, num_threads = 1, device_id = 0, num_gpus = 1)
pipe.build()
out = pipe.run()
for i in range(batch_size):
for j in range(0, len(out) - 2, 2):
# All boxes should be the same
assert(np.array_equal(out[j].at(i), out[j + 2].at(i)))
# All labels should be the same
assert(np.array_equal(out[j + 1].at(i), out[j + 3].at(i)))
def test_pipeline_separated_exec_setup():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, prefetch_queue_depth):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id, prefetch_queue_depth = prefetch_queue_depth)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.res = ops.Resize(device="cpu", resize_x=224, resize_y=224)
self.dump_cpu = ops.DumpImage(device = "cpu", suffix = "cpu")
self.dump_gpu = ops.DumpImage(device = "gpu", suffix = "gpu")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images = self.res(images)
images_cpu = self.dump_cpu(images)
images_gpu = self.dump_gpu(images.gpu())
return (images, images_cpu, images_gpu)
pipe = HybridPipe(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1,
prefetch_queue_depth = {"cpu_size": 5, "gpu_size": 3})
pipe.build()
out = pipe.run()
assert(out[0].is_dense_tensor())
assert(out[1].is_dense_tensor())
assert(out[2].is_dense_tensor())
assert(out[0].as_tensor().shape() == out[1].as_tensor().shape())
assert(out[0].as_tensor().shape() == out[2].as_tensor().shape())
a_raw = out[0]
a_cpu = out[1]
a_gpu = out[2].as_cpu()
for i in range(batch_size):
t_raw = a_raw.at(i)
t_cpu = a_cpu.at(i)
assert(np.sum(np.abs(t_cpu - t_raw)) == 0)
t_cpu = a_cpu.at(i)
t_gpu = a_gpu.at(i)
assert(np.sum(np.abs(t_cpu - t_gpu)) == 0)
def test_pipeline_simple_sync_no_prefetch():
batch_size = 16
n_iters = 12
class HybridPipe(Pipeline):
def __init__(self, batch_size):
super(HybridPipe, self).__init__(batch_size,
num_threads=1,
device_id=0, prefetch_queue_depth=1,
exec_async=False, exec_pipelined=False)
self.input = ops.CaffeReader(path = caffe_db_folder)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.dump_gpu = ops.DumpImage(device = "gpu", suffix = "gpu")
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images_gpu = self.dump_gpu(images.gpu())
return (images, images_gpu)
pipe = HybridPipe(batch_size=batch_size)
pipe.build()
for _ in range(n_iters):
out = pipe.run()
def test_use_twice():
batch_size = 128
class Pipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(Pipe, self).__init__(batch_size, num_threads, device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.res = ops.Resize(device="cpu", resize_x=224, resize_y=224)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images0 = self.res(images)
images1 = self.res(images)
return (images0, images1)
pipe = Pipe(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
pipe.build()
out = pipe.run()
assert(out[0].is_dense_tensor())
assert(out[1].is_dense_tensor())
assert(out[0].as_tensor().shape() == out[1].as_tensor().shape())
for i in range(batch_size):
assert(np.array_equal(out[0].at(i), out[0].at(i)))
def test_cropmirrornormalize_layout():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.cmnp_nhwc = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NHWC,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.cmnp_nchw = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
output_nhwc = self.cmnp_nhwc(images.gpu())
output_nchw = self.cmnp_nchw(images.gpu())
return (output_nchw, output_nhwc)
pipe = HybridPipe(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
pipe.build()
out = pipe.run()
assert(out[0].is_dense_tensor())
assert(out[1].is_dense_tensor())
shape_nchw = out[0].as_tensor().shape()
shape_nhwc = out[1].as_tensor().shape()
assert(shape_nchw[0] == shape_nhwc[0])
a_nchw = out[0].as_cpu()
a_nhwc = out[1].as_cpu()
for i in range(batch_size):
t_nchw = a_nchw.at(i)
t_nhwc = a_nhwc.at(i)
assert(t_nchw.shape == (3,224,224))
assert(t_nhwc.shape == (224,224,3))
assert(np.sum(np.abs(np.transpose(t_nchw, (1,2,0)) - t_nhwc)) == 0)
def test_cropmirrornormalize_pad():
batch_size = 128
class HybridPipe(Pipeline):
def __init__(self, layout, batch_size, num_threads, device_id, num_gpus):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.cmnp_pad = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = layout,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.],
pad_output = True)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = layout,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.],
pad_output = False)
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
output_pad = self.cmnp_pad(images.gpu())
output = self.cmnp(images.gpu())
return (output, output_pad)
for layout in [types.NCHW, types.NHWC]:
pipe = HybridPipe(layout, batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
pipe.build()
out = pipe.run()
assert(out[0].is_dense_tensor())
assert(out[1].is_dense_tensor())
shape = out[0].as_tensor().shape()
shape_pad = out[1].as_tensor().shape()
assert(shape[0] == shape_pad[0])
a = out[0].as_cpu()
a_pad = out[1].as_cpu()
for i in range(batch_size):
t = a.at(i)
t_pad = a_pad.at(i)
if (layout == types.NCHW):
assert(t.shape == (3,224,224))
assert(t_pad.shape == (4,224,224))
assert(np.sum(np.abs(t - t_pad[:3,:,:])) == 0)
assert(np.sum(np.abs(t_pad[3,:,:])) == 0)
else:
assert(t.shape == (224,224,3))
assert(t_pad.shape == (224,224,4))
assert(np.sum(np.abs(t - t_pad[:,:,:3])) == 0)
assert(np.sum(np.abs(t_pad[:,:,3])) == 0)
def test_cropmirrornormalize_multiple_inputs():
batch_size = 13
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads=1, device_id=0, num_gpus=1, device="cpu"):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id)
self.device = device
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.decode2 = ops.ImageDecoder(device = "cpu", output_type = types.RGB)
self.cmnp = ops.CropMirrorNormalize(device = device,
output_dtype = types.FLOAT,
output_layout = types.NHWC,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
def define_graph(self):
inputs, labels = self.input(name="Reader")
images = self.decode(inputs)
images2 = self.decode2(inputs)
images_device = images if self.device == "cpu" else images.gpu()
images2_device = images2 if self.device == "cpu" else images2.gpu()
output1, output2 = self.cmnp([images_device, images2_device])
output3 = self.cmnp([images_device])
output4 = self.cmnp([images2_device])
return (output1, output2, output3, output4)
for device in ["cpu", "gpu"]:
pipe = HybridPipe(batch_size=batch_size, device=device)
pipe.build()
for _ in range(5):
out1, out2, out3, out4 = pipe.run()
outs = [out.as_cpu() if device == 'gpu' else out for out in [out1, out2, out3, out4] ]
check_batch(outs[0], outs[1], batch_size)
check_batch(outs[0], outs[2], batch_size)
check_batch(outs[1], outs[3], batch_size)
def test_seed():
batch_size = 64
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
seed = 12)
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle = True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.coin = ops.CoinFlip()
self.uniform = ops.Uniform(range = (0.0,1.0))
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
mirror = self.coin()
output = self.cmnp(images, mirror = mirror, crop_pos_x = self.uniform(), crop_pos_y = self.uniform())
return (output, self.labels)
n = 30
for i in range(50):
pipe = HybridPipe(batch_size=batch_size,
num_threads=2,
device_id = 0)
pipe.build()
pipe_out = pipe.run()
pipe_out_cpu = pipe_out[0].as_cpu()
img_chw_test = pipe_out_cpu.at(n)
if i == 0:
img_chw = img_chw_test
assert(np.sum(np.abs(img_chw - img_chw_test)) == 0)
def test_as_array():
batch_size = 64
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
seed = 12)
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle = True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.coin = ops.CoinFlip()
self.uniform = ops.Uniform(range = (0.0,1.0))
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
mirror = self.coin()
output = self.cmnp(images, mirror = mirror, crop_pos_x = self.uniform(), crop_pos_y = self.uniform())
return (output, self.labels)
n = 30
for i in range(50):
pipe = HybridPipe(batch_size=batch_size,
num_threads=2,
device_id = 0)
pipe.build()
pipe_out = pipe.run()
pipe_out_cpu = pipe_out[0].as_cpu()
img_chw_test = pipe_out_cpu.as_array()
if i == 0:
img_chw = img_chw_test
assert(img_chw_test.shape == (batch_size,3,224,224))
assert(np.sum(np.abs(img_chw - img_chw_test)) == 0)
def test_seed_serialize():
batch_size = 64
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size,
num_threads,
device_id,
seed = 12)
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle = True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.coin = ops.CoinFlip()
self.uniform = ops.Uniform(range = (0.0,1.0))
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
mirror = self.coin()
output = self.cmnp(images, mirror = mirror, crop_pos_x = self.uniform(), crop_pos_y = self.uniform())
return (output, self.labels)
n = 30
orig_pipe = HybridPipe(batch_size=batch_size,
num_threads=2,
device_id = 0)
s = orig_pipe.serialize()
for i in range(50):
pipe = Pipeline()
pipe.deserialize_and_build(s)
pipe_out = pipe.run()
pipe_out_cpu = pipe_out[0].as_cpu()
img_chw_test = pipe_out_cpu.at(n)
if i == 0:
img_chw = img_chw_test
assert(np.sum(np.abs(img_chw - img_chw_test)) == 0)
def test_make_continuous_serialize():
batch_size = 32
class COCOPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(COCOPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.COCOReader(file_root=coco_image_folder, annotations_file=coco_annotation_file, ratio=True, ltrb=True)
self.decode = ops.ImageDecoder(device="mixed")
self.crop = ops.RandomBBoxCrop(device="cpu", seed = 12)
self.slice = ops.Slice(device="gpu")
def define_graph(self):
inputs, bboxes, labels = self.input()
images = self.decode(inputs)
crop_begin, crop_size, bboxes, labels = self.crop(bboxes, labels)
images = self.slice(images, crop_begin, crop_size)
return images
pipe = COCOPipeline(batch_size=batch_size, num_threads=2, device_id=0)
serialized_pipeline = pipe.serialize()
del(pipe)
new_pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
new_pipe.deserialize_and_build(serialized_pipeline)
def test_make_continuous_serialize_and_use():
batch_size = 2
class COCOPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(COCOPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.COCOReader(file_root=coco_image_folder, annotations_file=coco_annotation_file, ratio=True, ltrb=True)
self.decode = ops.ImageDecoder(device="mixed")
self.crop = ops.RandomBBoxCrop(device="cpu", seed = 25)
self.slice = ops.Slice(device="gpu")
def define_graph(self):
inputs, bboxes, labels = self.input()
images = self.decode(inputs)
crop_begin, crop_size, bboxes, labels = self.crop(bboxes, labels)
images = self.slice(images, crop_begin, crop_size)
return images
pipe = COCOPipeline(batch_size=batch_size, num_threads=2, device_id=0)
serialized_pipeline = pipe.serialize()
new_pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
new_pipe.deserialize_and_build(serialized_pipeline)
compare_pipelines(pipe, new_pipe, batch_size, 50)
def test_warpaffine():
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed = 12)
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle = True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NHWC,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.affine = ops.WarpAffine(device = "gpu",
matrix = [1.0, 0.8, -0.8*112, 0.0, 1.2, -0.2*112],
fill_value = 128,
interp_type = types.INTERP_LINEAR)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
outputs = self.cmnp([images, images])
outputs[1] = self.affine(outputs[1])
return [self.labels] + outputs
pipe = HybridPipe(batch_size=128, num_threads=2, device_id = 0)
pipe.build()
pipe_out = pipe.run()
import cv2
orig_cpu = pipe_out[1].as_cpu()
for i in range(128):
orig = orig_cpu.at(i)
# apply 0.5 correction for opencv's not-so-good notion of pixel centers
M = np.array([1.0, 0.8, -0.8*(112 - 0.5), 0.0, 1.2, -0.2*(112 - 0.5)]).reshape((2,3))
out = cv2.warpAffine(orig, M, (224,224), borderMode=cv2.BORDER_CONSTANT, borderValue = (128, 128, 128),
flags = (cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR))
dali_output = pipe_out[2].as_cpu().at(i)
maxdif = np.max(cv2.absdiff(out, dali_output)/255.0)
assert(maxdif < 0.025)
def test_type_conversion():
class HybridPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(HybridPipe, self).__init__(batch_size, num_threads, device_id, seed = 12)
self.input = ops.CaffeReader(path = caffe_db_folder, random_shuffle = True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmnp_all = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NHWC,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.cmnp_int = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NHWC,
crop = (224, 224),
image_type = types.RGB,
mean = [128, 128, 128],
std = [1., 1, 1]) # Left 1 of the arguments as float to test whether mixing types works
self.cmnp_1arg = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
output_layout = types.NHWC,
crop = (224, 224),
image_type = types.RGB,
mean = 128,
std = 1)
self.uniform = ops.Uniform(range = (0,1))
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
outputs = [ None for i in range(3)]
crop_pos_x = self.uniform()
crop_pos_y = self.uniform()
outputs[0] = self.cmnp_all(images,
crop_pos_x = crop_pos_x,
crop_pos_y = crop_pos_y)
outputs[1] = self.cmnp_int(images,
crop_pos_x = crop_pos_x,
crop_pos_y = crop_pos_y)
outputs[2] = self.cmnp_1arg(images,
crop_pos_x = crop_pos_x,
crop_pos_y = crop_pos_y)
return [self.labels] + outputs
pipe = HybridPipe(batch_size=128, num_threads=2, device_id = 0)
pipe.build()
for i in range(10):
pipe_out = pipe.run()
orig_cpu = pipe_out[1].as_cpu().as_tensor()
int_cpu = pipe_out[2].as_cpu().as_tensor()
arg1_cpu = pipe_out[3].as_cpu().as_tensor()
assert_array_equal(orig_cpu, int_cpu)
assert_array_equal(orig_cpu, arg1_cpu)
def test_crop():
class CMNvsCropPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(CMNvsCropPipe, self).__init__(batch_size, num_threads, device_id, seed = 12)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = 1)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmn = ops.CropMirrorNormalize(device = "gpu",
output_layout = types.NHWC,
output_dtype = types.FLOAT,
crop = (224, 224),
image_type = types.RGB,
mean = [0., 0., 0.],
std = [1., 1., 1.])
self.crop = ops.Crop(device = "gpu",
crop = (224, 224),
image_type = types.RGB)
self.uniform = ops.Uniform(range = (0.0, 1.0))
self.cast = ops.Cast(device = "gpu",
dtype = types.INT32)
def define_graph(self):
inputs, labels = self.input()
images = self.decode(inputs)
crop_x = self.uniform()
crop_y = self.uniform()
output_cmn = self.cmn(images, crop_pos_x = crop_x, crop_pos_y = crop_y)
output_crop = self.crop(images, crop_pos_x = crop_x, crop_pos_y = crop_y)
output_cmn = self.cast(output_cmn)
output_crop = self.cast(output_crop)
return (output_cmn, output_crop, labels.gpu())
batch_size = 8
iterations = 8
pipe = CMNvsCropPipe(batch_size=batch_size, num_threads=2, device_id = 0)
pipe.build()
for _ in range(iterations):
pipe_out = pipe.run()
cmn_img_batch_cpu = pipe_out[0].as_cpu()
crop_img_batch_cpu = pipe_out[1].as_cpu()
for b in range(batch_size):
img_cmn = cmn_img_batch_cpu.at(b)
img_crop = crop_img_batch_cpu.at(b)
assert(np.array_equal(img_cmn, img_crop))
def test_transpose():
class TransposePipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id):
super(TransposePipe, self).__init__(batch_size, num_threads, device_id, seed=12)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = 1)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.crop = ops.Crop(device = "gpu",
crop = (224, 224),
image_type = types.RGB)
self.transpose = ops.Transpose(device="gpu", perm=[2, 0, 1])
def define_graph(self):
imgs, labels = self.input()
output = self.decode(imgs)
cropped = self.crop(output)
transposed = self.transpose(cropped)
return (cropped, transposed, labels.gpu())
batch_size = 8
iterations = 8
pipe = TransposePipe(batch_size=batch_size, num_threads=2, device_id = 0)
pipe.build()
for _ in range(iterations):
pipe_out = pipe.run()
images = pipe_out[0].asCPU().as_array()
images_transposed = pipe_out[1].asCPU().as_array()
for b in range(batch_size):
np_transposed = images[b].transpose((2, 0, 1))
np_transposed = np.ascontiguousarray(np_transposed)
assert(np.array_equal(np_transposed, images_transposed[b]))
def test_equal_ImageDecoderCrop_ImageDecoder():
"""
Comparing results of pipeline: (ImageDecoder -> Crop), with the same operation performed by fused operator
"""
batch_size =128
class NonFusedPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(NonFusedPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.pos_rng_x = ops.Uniform(range = (0.0, 1.0), seed=1234)
self.pos_rng_y = ops.Uniform(range = (0.0, 1.0), seed=5678)
self.crop = ops.Crop(device="gpu", crop =(224,224))
def define_graph(self):
self.jpegs, self.labels = self.input()
pos_x = self.pos_rng_x()
pos_y = self.pos_rng_y()
images = self.decode(self.jpegs)
crop = self.crop(images, crop_pos_x=pos_x, crop_pos_y=pos_y)
return (crop, self.labels)
class FusedPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(FusedPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.pos_rng_x = ops.Uniform(range = (0.0, 1.0), seed=1234)
self.pos_rng_y = ops.Uniform(range = (0.0, 1.0), seed=5678)
self.decode = ops.ImageDecoderCrop(device = 'mixed', output_type = types.RGB, crop = (224, 224))
def define_graph(self):
self.jpegs, self.labels = self.input()
pos_x = self.pos_rng_x()
pos_y = self.pos_rng_y()
images = self.decode(self.jpegs, crop_pos_x=pos_x, crop_pos_y=pos_y)
return (images, self.labels)
nonfused_pipe = NonFusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
nonfused_pipe.build()
nonfused_pipe_out = nonfused_pipe.run()
fused_pipe = FusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
fused_pipe.build()
fused_pipe_out = fused_pipe.run()
for i in range(batch_size):
nonfused_pipe_out_cpu = nonfused_pipe_out[0].as_cpu()
fused_pipe_out_cpu = fused_pipe_out[0].as_cpu()
assert(np.sum(np.abs(nonfused_pipe_out_cpu.at(i)-fused_pipe_out_cpu.at(i)))==0)
def test_equal_ImageDecoderRandomCrop_ImageDecoder():
"""
Comparing results of pipeline: (ImageDecoder -> RandomCrop), with the same operation performed by fused operator
"""
batch_size =128
class NonFusedPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, seed):
super(NonFusedPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus, seed = seed)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.res = ops.RandomResizedCrop(device="gpu", size =(224,224), seed=seed)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.coin = ops.CoinFlip(seed = seed)
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
resized_images = self.res(images)
mirror = self.coin()
output = self.cmnp(resized_images, mirror = mirror)
return (output, resized_images, self.labels)
class FusedPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus, seed):
super(FusedPipeline, self).__init__(batch_size, num_threads, device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus, seed = seed)
self.decode = ops.ImageDecoderRandomCrop(device = "mixed", output_type = types.RGB, seed=seed)
self.res = ops.Resize(device="gpu", resize_x=224, resize_y=224)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
output_dtype = types.FLOAT,
crop = (224, 224),
image_type = types.RGB,
mean = [128., 128., 128.],
std = [1., 1., 1.])
self.coin = ops.CoinFlip(seed = seed)
def define_graph(self):
self.jpegs, self.labels = self.input()
images = self.decode(self.jpegs)
resized_images = self.res(images)
mirror = self.coin()
output = self.cmnp(resized_images, mirror = mirror)
return (output, resized_images, self.labels)
random_seed = 123456
nonfused_pipe = NonFusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1, seed = random_seed)
nonfused_pipe.build()
nonfused_pipe_out = nonfused_pipe.run()
fused_pipe = FusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1, seed = random_seed)
fused_pipe.build()
fused_pipe_out = fused_pipe.run()
nonfused_pipe_out_cpu = nonfused_pipe_out[0].as_cpu()
fused_pipe_out_cpu = fused_pipe_out[0].as_cpu()
for i in range(batch_size):
assert(np.mean(np.abs(nonfused_pipe_out_cpu.at(i)-fused_pipe_out_cpu.at(i))) < 0.5)
class ExternalInputIterator(object):
def __init__(self, batch_size):
self.batch_size = batch_size
def __iter__(self):
self.i = 0
self.n = self.batch_size
return self
def __next__(self):
pos = []
size = []
for _ in range(self.batch_size):
pos.append(np.asarray([0.4, 0.2], dtype=np.float32))
size.append(np.asarray([0.3, 0.5], dtype=np.float32))
self.i = (self.i + 1) % self.n
return (pos, size)
next = __next__
class LazyPipeline(Pipeline):
def __init__(self, batch_size, db_folder, lazy_type, num_threads=1, device_id=0, num_gpus=1):
super(LazyPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = db_folder, shard_id = device_id, num_shards = num_gpus, lazy_init = lazy_type)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.pos_rng_x = ops.Uniform(range = (0.0, 1.0), seed=1234)
self.pos_rng_y = ops.Uniform(range = (0.0, 1.0), seed=5678)
self.crop = ops.Crop(device="gpu", crop =(224,224))
def define_graph(self):
self.jpegs, self.labels = self.input()
pos_x = self.pos_rng_x()
pos_y = self.pos_rng_y()
images = self.decode(self.jpegs)
crop = self.crop(images, crop_pos_x=pos_x, crop_pos_y=pos_y)
return (crop, self.labels)
def test_lazy_init_empty_data_path():
empty_db_folder="/data/empty"
batch_size = 128
nonlazy_pipe = LazyPipeline(batch_size, empty_db_folder, lazy_type=False)
try:
nonlazy_pipe.build()
assert(False)
except RuntimeError:
assert(True)
lazy_pipe = LazyPipeline(batch_size, empty_db_folder, lazy_type=True)
try:
lazy_pipe.build()
assert(True)
except BaseException:
assert(False)
def test_lazy_init():
"""
Comparing results of pipeline: lazy_init false and lazy_init true with empty folder and real folder
"""
batch_size =128
compare_pipelines(LazyPipeline(batch_size, caffe_db_folder, lazy_type=False),
LazyPipeline(batch_size, caffe_db_folder, lazy_type=True),
batch_size=batch_size, N_iterations=20)
def test_equal_ImageDecoderSlice_ImageDecoder():
"""
Comparing results of pipeline: (ImageDecoder -> Slice), with the same operation performed by fused operator
"""
batch_size =128
eii = ExternalInputIterator(128)
pos_size_iter = iter(eii)
class NonFusedPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(NonFusedPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
self.input_crop = ops.ExternalSource()
self.decode = ops.ImageDecoder(device='mixed', output_type=types.RGB)
self.slice = ops.Slice(device = 'gpu')
def define_graph(self):
jpegs, labels = self.input()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
images = self.decode(jpegs)
slice = self.slice(images, self.crop_pos, self.crop_size)
return (slice, labels)
def iter_setup(self):
(crop_pos, crop_size) = pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
class FusedPipeline(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(FusedPipeline, self).__init__(batch_size,
num_threads,
device_id)
self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.input_crop_pos = ops.ExternalSource()
self.input_crop_size = ops.ExternalSource()
self.input_crop = ops.ExternalSource()
self.decode = ops.ImageDecoderSlice(device = 'mixed', output_type = types.RGB)
def define_graph(self):
jpegs, labels = self.input()
self.crop_pos = self.input_crop_pos()
self.crop_size = self.input_crop_size()
images = self.decode(jpegs, self.crop_pos, self.crop_size)
return (images, labels)
def iter_setup(self):
(crop_pos, crop_size) = pos_size_iter.next()
self.feed_input(self.crop_pos, crop_pos)
self.feed_input(self.crop_size, crop_size)
nonfused_pipe = NonFusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
nonfused_pipe.build()
nonfused_pipe_out = nonfused_pipe.run()
fused_pipe = FusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1)
fused_pipe.build()
fused_pipe_out = fused_pipe.run()
for i in range(batch_size):
nonfused_pipe_out_cpu = nonfused_pipe_out[0].as_cpu()
fused_pipe_out_cpu = fused_pipe_out[0].as_cpu()
assert(np.sum(np.abs(nonfused_pipe_out_cpu.at(i)-fused_pipe_out_cpu.at(i)))==0)
def test_iter_setup():
class TestIterator():
def __init__(self, n):
self.n = n
def __iter__(self):
self.i = 0
return self
def __next__(self):
batch = []
if self.i < self.n:
batch.append(np.arange(0, 1, dtype=np.float))
self.i += 1
return batch
else:
self.i = 0
raise StopIteration
next = __next__
class IterSetupPipeline(Pipeline):
def __init__(self, iterator, num_threads, device_id):
super(IterSetupPipeline, self).__init__(1, num_threads, device_id)
self.input = ops.ExternalSource()
self.iterator = iterator
def define_graph(self):
self.batch = self.input()
return self.batch
def iter_setup(self):
batch = next(self.iterator)
self.feed_input(self.batch, batch)
iter_num = 5
iterator = iter(TestIterator(iter_num))
i = 0
while True:
try:
batch = next(iterator)
i += 1
except StopIteration:
break
assert(iter_num == i)
iterator = iter(TestIterator(iter_num))
pipe = IterSetupPipeline(iterator, 3, 0)
pipe.build()
i = 0
while True:
try:
pipe_out = pipe.run()
i += 1
except StopIteration:
break
assert(iter_num == i)
pipe.reset()
i = 0
while True:
try:
pipe_out = pipe.run()
i += 1
except StopIteration:
break
assert(iter_num == i)
def test_external_source():
class TestIterator():
def __init__(self, n):
self.n = n
def __iter__(self):
self.i = 0
return self
def __next__(self):
batch_1 = []
batch_2 = []
if self.i < self.n:
batch_1.append( | np.arange(0, 1, dtype=np.float) | numpy.arange |
"""Implementation of different methods that integrate planning with learning"""
import argparse
import numpy as np
import plot as plt
import random
from abc import ABC
from collections import defaultdict, namedtuple
from env import Action, Easy21, State
from heapq import heapify, heappop, heappush
from policy import EpsilonGreedyPolicy, Policy, RandomPolicy
from tqdm import tqdm
from typing import List
# For reproducibility
random.seed(0)
Trajectory = namedtuple("Trajectory", ["state", "action", "reward"])
class Planning(ABC):
"""A base class defining a planning algorithm"""
def __init__(self):
self._env = Easy21(seed=24)
self.Q = np.zeros((*self._env.state_space, self._env.action_space))
# Model of the world represented by approximations of the transition and reward functions
# We initialize the transition function with a very small value greater than 0 to avoid
# division by zero when computing transition probabilities for trajectories that haven't been encountered
self.T = np.full((*self._env.state_space, self._env.action_space, *self._env.state_space), 0.00001)
self.R = np.zeros((*self._env.state_space, self._env.action_space))
def _update_model(self, s, a, r, s_prime, done):
if not done:
# We only update the transition model for non-terminal states since the terminal state
# is most likely an "invalid" state for this environment, e.g. a player's sum over 21
self.T[s.dealer_first_card, s.player_sum, a, s_prime.dealer_first_card, s_prime.player_sum] += 1
# We update the model of the reward by learning it similarly to the action values
# This is not specified in Sutton & Barto's book but I learned it during my Master's
self.R[s.dealer_first_card, s.player_sum, a] += 0.2 * (r - self.R[s.dealer_first_card, s.player_sum, a])
def _learn(self, pi, s, a, r, s_prime, done, alpha, gamma):
if done:
td_target = r
else:
td_target = r + gamma * np.max(self.Q[s_prime.dealer_first_card, s_prime.player_sum, :])
td_error = td_target - self.Q[s.dealer_first_card, s.player_sum, a]
# Prediction
self.Q[s.dealer_first_card, s.player_sum, a] += alpha * td_error
# Improvement
pi[s] = np.argmax(self.Q[s.dealer_first_card, s.player_sum, :])
class DynaQ(Planning):
"""Dyna-Q algorithm"""
def learn(self, epochs=200, n=100, alpha=0.5, gamma=0.9, verbose=False, **kwargs) -> np.ndarray:
"""
Learns the optimal value function.
:param int epochs: The number of epochs to take to learn the value function
:param int n: The planning iterations to use
:param float alpha: The learning rate
:param float gamma: The discount factor
:param bool verbose: Whether to use verbose mode or not
:param dict kwargs: Extra arguments, ignored
:return: The optimal value function
:rtype: np.ndarray
"""
pi = EpsilonGreedyPolicy(seed=24)
for _ in tqdm(range(epochs), disable=not verbose):
done = False
s = self._env.reset()
while not done:
a = pi[s]
s_prime, r, done = self._env.step(a)
# Learning phase
self._learn(pi, s, a, r, s_prime, done, alpha, gamma)
# Planning phase
if n > 0:
self._update_model(s, a, r, s_prime, done)
self._plan(pi, done, n, alpha, gamma)
s = s_prime
# Compute the optimal value function which is simply the value of the best action (last dimension) in each state
return np.max(self.Q, axis=2)
def _plan(self, pi, done, n, alpha, gamma):
# Compute the probabilities of each s,a -> s' transition over all possible transitions from each s,a
transition_probs = self.T / | np.sum(self.T, axis=(0, 1, 2)) | numpy.sum |
# This file is part of Frhodo. Copyright © 2020, UChicago Argonne, LLC
# and licensed under BSD-3-Clause. See License.txt in the top-level
# directory for license and copyright information.
from tabulate import tabulate
import matplotlib as mpl
import numpy as np
from scipy import stats
from convert_units import OoM
from plot.base_plot import Base_Plot
from plot.draggable import Draggable
def shape_data(x, y):
return np.transpose(np.vstack((x, y)))
class Plot(Base_Plot):
def __init__(self, parent, widget, mpl_layout):
super().__init__(parent, widget, mpl_layout)
self.show_unc_shading = False
# Connect Signals
self.canvas.mpl_connect('resize_event', self._resize_event)
parent.num_sim_lines_box.valueChanged.connect(self.set_history_lines)
def info_table_text(self):
parent = self.parent
# TODO: Fix variables when implementing zone 2 and 5 option
shock_zone = parent.display_shock['zone']
if shock_zone == 2:
display_vars = ['T2', 'P2']
elif shock_zone == 5:
display_vars = ['T5', 'P5']
table = [['Shock {:d}'.format(parent.var['shock_choice']), '']]
# This sets the info table to have the units selected in the shock properties window
if not np.isnan([parent.display_shock[key] for key in display_vars]).all():
T_unit = eval('str(parent.' + display_vars[0] + '_units_box.currentText())')
P_unit = eval('str(parent.' + display_vars[1] + '_units_box.currentText())')
T_value = parent.convert_units(parent.display_shock[display_vars[0]], T_unit, 'out')
P_value = parent.convert_units(parent.display_shock[display_vars[1]], P_unit, 'out')
table.append(['T{:.0f} {:s}'.format(shock_zone, T_unit), '{:.2f}'.format(T_value)])
table.append(['P{:.0f} {:s}'.format(shock_zone, P_unit), '{:.2f}'.format(P_value)])
for species, mol_frac in parent.display_shock['thermo_mix'].items():
table.append(['{:s}'.format(species), '{:g}'.format(mol_frac)])
table = tabulate(table).split('\n')[1:-1] # removes header and footer
table_left_justified = []
max_len = len(max(table, key=len))
for line in table:
table_left_justified.append('{:<{max_len}}'.format(line, max_len=max_len))
return '\n'.join(table_left_justified)
def create_canvas(self):
self.ax = []
## Set upper plots ##
self.ax.append(self.fig.add_subplot(4,1,1))
self.ax[0].item = {}
self.ax[0].item['weight_unc_fcn'] = self.ax[0].add_line(mpl.lines.Line2D([],[], c = '#800000', zorder=1))
markers = {'weight_shift': {'marker': 'o', 'markersize': 7},
'weight_k': {'marker': '$'+'\u2194'+'$', 'markersize': 12},
'weight_extrema': {'marker': '$'+u'\u2195'+'$', 'markersize': 12},
'unc_shift': {'marker': 'o', 'markersize': 7},
'unc_k': {'marker': '$'+'\u2194'+'$', 'markersize': 12},
'unc_extrema': {'marker': '$'+u'\u2195'+'$', 'markersize': 12}}
for name, attr in markers.items():
self.ax[0].item[name] = self.ax[0].add_line(mpl.lines.Line2D([],[], marker=attr['marker'],
markersize=attr['markersize'], markerfacecolor='#BF0000', markeredgecolor='None',
linestyle='None', zorder=2))
self.ax[0].item['sim_info_text'] = self.ax[0].text(.98,.92, '', fontsize=10, fontname='DejaVu Sans Mono',
horizontalalignment='right', verticalalignment='top', transform=self.ax[0].transAxes)
self.ax[0].set_ylim(-0.1, 1.1)
self.ax[0].tick_params(labelbottom=False)
self.ax[0].item['title'] = self.ax[0].text(.5,.95,'Weighting', fontsize='large',
horizontalalignment='center', verticalalignment='top', transform=self.ax[0].transAxes)
self.fig.subplots_adjust(left=0.06, bottom=0.065, right=0.98,
top=0.98, hspace=0, wspace=0.12)
## Set lower plots ##
self.ax.append(self.fig.add_subplot(4,1,(2,4), sharex = self.ax[0]))
self.ax[1].item = {}
self.ax[1].item['exp_data'] = self.ax[1].scatter([],[], color='0', facecolors='0',
linewidth=0.5, alpha = 0.85, zorder=2)
self.ax[1].item['sim_data'] = self.ax[1].add_line(mpl.lines.Line2D([],[], c='#0C94FC', zorder=4))
nan_array = [np.nan, np.nan]
self.ax[1].item['unc_shading'] = self.ax[1].fill_between(nan_array, nan_array, nan_array,
color='#0C94FC', alpha=0.2, linewidth=0, zorder=0)
self.ax[1].item['unc_shading'].empty_verts = [path._vertices for path in self.ax[1].item['unc_shading'].get_paths()]
self.ax[1].item['unc_shading'].empty_codes = [path._codes for path in self.ax[1].item['unc_shading'].get_paths()]
self.ax[1].item['history_data'] = []
self.ax[1].item['cutoff_line'] = [self.ax[1].axvline(x=np.nan, ls='--', c='#BF0000', zorder=5),
self.ax[1].axvline(x=np.nan, ls='--', c='#BF0000', zorder=5)]
self.lastRxnNum = None
self.ax[1].text(.5,.98,'Observable', fontsize='large',
horizontalalignment='center', verticalalignment='top', transform=self.ax[1].transAxes)
self.parent.rxn_change_history = []
self.set_history_lines()
# Create colorbar legend
self.cbax = self.fig.add_axes([0.90, 0.575, 0.02, 0.15], zorder=3)
self.cb = mpl.colorbar.ColorbarBase(self.cbax, cmap=mpl.cm.gray,
ticks=[0, 0.5, 1], orientation='vertical')
self.cbax.invert_yaxis()
self.cbax.set_yticklabels(['1', '0.5', '0']) # horizontal colorbar
self.cb.set_label('Weighting')
# Create canvas from Base
super().create_canvas()
self._set_scale('y', 'abslog', self.ax[1]) # set Signal/SIM y axis to abslog
# Add draggable lines
draggable_items = [[0, 'weight_shift'], [0, 'weight_k'], [0, 'weight_extrema'],
[0, 'unc_shift'], [0, 'unc_k'], [0, 'unc_extrema'],
[1, 'sim_data'], [1, 'cutoff_line']]
for pair in draggable_items:
n, name = pair # n is the axis number, name is the item key
items = self.ax[n].item[name]
if not isinstance(items, list): # check if the type is a list
items = [self.ax[n].item[name]]
for item in items:
update_fcn = lambda x, y, item=item: self.draggable_update_fcn(item, x, y)
press_fcn = lambda x, y, item=item: self.draggable_press_fcn(item, x, y)
release_fcn = lambda item=item: self.draggable_release_fcn(item)
item.draggable = Draggable(self, item, update_fcn, press_fcn, release_fcn)
def set_history_lines(self):
old_num_hist_lines = len(self.ax[1].item['history_data'])
num_hist_lines = self.parent.num_sim_lines_box.value() - 1
numDiff = np.abs(old_num_hist_lines - num_hist_lines)
if old_num_hist_lines > num_hist_lines:
del self.ax[1].item['history_data'][0:numDiff]
elif old_num_hist_lines < num_hist_lines:
for n in range(old_num_hist_lines, old_num_hist_lines+numDiff):
line = mpl.lines.Line2D([],[])
self.ax[1].item['history_data'].append({'line': self.ax[1].add_line(line), 'rxnNum': None}, zorder=3)
color = mpl.cm.nipy_spectral(np.linspace(0.05, 0.95, num_hist_lines)[::-1])
for n, item in enumerate(self.ax[1].item['history_data']):
item['line'].set_color(color[n])
if hasattr(self, 'canvas'): # this can be deleted after testing color changes
self._draw_items_artist()
def draggable_press_fcn(self, item, x, y):
x0, xpress, xnew, xpressnew = x['0'], x['press'], x['new'], x['press_new']
y0, ypress, ynew, ypressnew = y['0'], y['press'], y['new'], y['press_new']
xy_data = item.get_xydata()
xy_press = np.array([xpress, ypress])
xy_OoM = 10**OoM(xy_press)
# calculate distance from press and points, don't need sqrt for comparison, divide by OoM for large differences in x/y OoM
distance_cmp = np.sum(np.subtract(xy_data/xy_OoM, xy_press/xy_OoM)**2, axis=1)
item.draggable.nearest_index = np.nanargmin(distance_cmp) # choose closest point to press
def draggable_release_fcn(self, item):
item.draggable.nearest_index = 0 # reset nearest_index
def draggable_update_fcn(self, item, x, y):
parent = self.parent
x = {key: np.array(val)/parent.var['reactor']['t_unit_conv'] for key, val in x.items()} # scale with unit choice
x0, xpress, xnew, xpressnew = x['0'], x['press'], x['new'], x['press_new']
y0, ypress, ynew, ypressnew = y['0'], y['press'], y['new'], y['press_new']
exp_data = parent.display_shock['exp_data']
if item is self.ax[1].item['sim_data']:
time_offset = np.round(xnew[0]/0.01)*0.01
for box in parent.time_offset_box.twin:
box.blockSignals(True)
box.setValue(time_offset)
box.blockSignals(False)
parent.var['time_offset'] = parent.time_offset_box.value()*parent.var['reactor']['t_unit_conv']
parent.tree._copy_expanded_tab_rates() # update rates/time offset autocopy
self.update_sim(parent.SIM.independent_var, parent.SIM.observable)
elif item in self.ax[1].item['cutoff_line']:
for n in range(0,2):
if item is self.ax[1].item['cutoff_line'][n]:
break
t_conv = parent.var['reactor']['t_unit_conv']
t = exp_data[:,0]
t_min = np.min(t)
cutoff_perc = (xnew*t_conv- t_min)/(np.max(t) - t_min)
parent.exp_unc.boxes['unc_cutoff'][n].setValue(cutoff_perc*100)
elif item is self.ax[0].item['weight_shift'] or item is self.ax[0].item['unc_shift']:
if item is self.ax[0].item['weight_shift']:
plot_type = 'weight'
box_type = plot_type
elif item is self.ax[0].item['unc_shift']:
plot_type = 'unc'
box_type = 'exp_unc'
t_conv = parent.var['reactor']['t_unit_conv']
n = item.draggable.nearest_index
# shift must be within the experiment
xnew = (xnew[n]*t_conv - exp_data[0,0])/(exp_data[-1,0] - exp_data[0,0])*100
if n == 0:
if xnew < 0.0:
xnew = 0.0
elif xnew > parent.display_shock[f'{plot_type}_shift'][1]:
xnew = parent.display_shock[f'{plot_type}_shift'][1]
elif n == 1:
if xnew < parent.display_shock[f'{plot_type}_shift'][0]:
xnew = parent.display_shock[f'{plot_type}_shift'][0]
elif xnew > 100:
xnew = 100
eval(f'parent.{box_type}.boxes["{plot_type}_shift"][n].setValue(xnew)')
elif item is self.ax[0].item['weight_k'] or item is self.ax[0].item['unc_k']: # save n on press, erase on release
if item is self.ax[0].item['weight_k']:
plot_type = 'weight'
box_type = plot_type
elif item is self.ax[0].item['unc_k']:
plot_type = 'unc'
box_type = 'exp_unc'
xy_data = item.get_xydata()
n = item.draggable.nearest_index
i = n // 2
shift = parent.display_shock[f'{plot_type}_shift'][i]
shift = shift/100*(exp_data[-1,0] - exp_data[0,0]) + exp_data[0,0]
shift /= parent.var['reactor']['t_unit_conv']
# Calculate new sigma, shift - sigma or sigma - shift based on which point is selected
sigma_new = -((-1)**(n))*(xnew[n] - shift)
if sigma_new < 0: # Sigma must be greater than 0
sigma_new = 0
eval(f'parent.{box_type}.boxes["{plot_type}_k"][i].setValue(sigma_new)')
elif item is self.ax[0].item['weight_extrema'] or item is self.ax[0].item['unc_extrema']: # TODO: FIX SCALE NOT CHANGING WHEN ALTERING THROUGH PLOT
xy_data = item.get_xydata()
n = item.draggable.nearest_index
if item is self.ax[0].item['weight_extrema']:
plot_type = 'weight'
box_type = plot_type
if n != 1:
weight_type = 'weight_min'
i = n // 2
else:
weight_type = 'weight_max'
i = 0
elif item is self.ax[0].item['unc_extrema']:
plot_type = 'unc'
box_type = 'exp_unc'
if n != 1:
weight_type = 'unc_max'
i = n // 2
else:
weight_type = 'unc_min'
i = 0
box = eval(f'parent.{box_type}.boxes["{weight_type}"][i]')
extrema_new = ynew[n]
if self.parent.exp_unc.unc_type == '%':
GUI_max = parent.display_shock[weight_type][i]/100
extrema_new = ynew[n] + GUI_max - xy_data[n][1] # account for fcn not reaching maximum
# Must be greater than 0 and less than 0.99
if extrema_new < box.minimum():
extrema_new = box.minimum() # Let the GUI decide low end
elif extrema_new > box.maximum():
extrema_new = box.maximum()
box.setValue(extrema_new*100)
else:
GUI_max = parent.display_shock[weight_type][i]
extrema_new = ynew[n] + GUI_max - xy_data[n][1] # account for fcn not reaching maximum
box.setValue(extrema_new)
# Update plot if data exists
if exp_data.size > 0:
parent.update_user_settings()
self.update()
def _resize_event(self, event=None):
canvas_width = self.canvas.size().width()
left = -7.6E-08*canvas_width**2 + 2.2E-04*canvas_width + 7.55E-01 # Might be better to adjust by pixels
self.cbax.set_position([left, 0.575, 0.02, 0.15])
def _clear_event(self, event=None): # unused
self.fig.clear()
def update(self, update_lim=False):
parent = self.parent
if parent.display_shock['exp_data'].size == 0: return
t = parent.display_shock['exp_data'][:,0]
data = parent.display_shock['exp_data'][:,1]
# Update upper plot
obj_fcn_type = parent.obj_fcn_type_box.currentText()
if obj_fcn_type == 'Residual':
self.update_weight_plot()
else:
self.update_uncertainty_plot()
self.update_uncertainty_shading()
# Update lower plot
weights = parent.display_shock['weights']
self.ax[1].item['exp_data'].set_offsets(shape_data(t, data))
self.ax[1].item['exp_data'].set_facecolor(np.char.mod('%f', 1-weights))
self.update_info_text()
if update_lim:
self.update_xylim(self.ax[1])
def update_weight_plot(self):
parent = self.parent
if parent.display_shock['exp_data'].size == 0: return
t = parent.display_shock['exp_data'][:,0]
shift = np.array(parent.display_shock['weight_shift'])/100*(t[-1] - t[0]) + t[0]
inv_growth_rate = np.array(parent.display_shock['weight_k'])*self.parent.var['reactor']['t_unit_conv']
weight_fcn = parent.series.weights
weights = parent.display_shock['weights'] = weight_fcn(t)
self.ax[0].item['weight_unc_fcn'].set_xdata(t)
self.ax[0].item['weight_unc_fcn'].set_ydata(weights)
# calculate mu markers
mu = shift
f_mu = weight_fcn(mu, calcIntegral=False)
# calculate extrema markers
t_range = np.max(t) - np.min(t)
t_extrema = np.array([np.min(t), np.mean(mu), np.max(t)]) + np.array([0.0125, 0, -0.025])*t_range # put arrow at 95% of x data
# calculate sigma markers
ones_shape = (np.shape(f_mu)[0], 2)
sigma = np.ones(ones_shape)*mu + (np.ones(ones_shape)*np.array([-1, 1])).T*inv_growth_rate
sigma = sigma.T # sort may be unnecessary
f_sigma = np.reshape(weight_fcn(sigma.flatten(), calcIntegral=False), ones_shape)
for i in np.argwhere(inv_growth_rate == 0.0):
f = weight_fcn(np.array([(1.0-1E-3), (1.0+1E-3)])*mu[i], calcIntegral=False)
f_mu[i] = np.mean(f)
perc = 0.1824
f_sigma[i] = [(1-perc)*f[0] + perc*f[1], perc*f[0] + (1-perc)*f[1]]
sigma = sigma.flatten()
f_sigma = f_sigma.flatten()
if sigma[1] >= 0.80*t_extrema[1] + 0.20*mu[0]: # hide sigma symbols if too close to center extrema
sigma[1] = np.nan
if sigma[2] <= 0.75*t_extrema[1] + 0.25*mu[1]:
sigma[2] = np.nan
# Set markers
self.ax[0].item['weight_shift'].set_xdata(mu)
self.ax[0].item['weight_shift'].set_ydata(f_mu)
self.ax[0].item['weight_k'].set_xdata(sigma.flatten())
self.ax[0].item['weight_k'].set_ydata(f_sigma.flatten())
self.ax[0].item['weight_extrema'].set_xdata(t_extrema)
self.ax[0].item['weight_extrema'].set_ydata(weight_fcn(t_extrema, calcIntegral=False))
def update_uncertainty_plot(self):
parent = self.parent
if parent.display_shock['exp_data'].size == 0: return
t = parent.display_shock['exp_data'][:,0]
shift = np.array(parent.display_shock['unc_shift'])/100*(t[-1] - t[0]) + t[0]
inv_growth_rate = np.array(parent.display_shock['unc_k'])*self.parent.var['reactor']['t_unit_conv']
unc_fcn = parent.series.uncertainties
uncertainties = unc_fcn(t, calcWeights=True)
parent.display_shock['uncertainties'] = uncertainties
self.ax[0].item['weight_unc_fcn'].set_xdata(t)
self.ax[0].item['weight_unc_fcn'].set_ydata(uncertainties)
# calculate mu markers
mu = shift
f_mu = unc_fcn(mu)
# calculate extrema markers
t_min =np.min(t)
t_max =np.max(t)
t_range = t_max - t_min
t_extrema = np.array([t_min, np.mean(mu), t_max]) + np.array([0.0125, 0, -0.025])*t_range # put arrow at 95% of x data
# calculate sigma markers
ones_shape = (np.shape(f_mu)[0], 2)
sigma = np.ones(ones_shape)*mu + ( | np.ones(ones_shape) | numpy.ones |
from numpy.linalg import norm as _norm
import numpy as _np
r"""
Pore-scale models related to topology of the network.
"""
def coordination_number(target):
r"""
Find the number of neighbors for each pore
"""
network = target.network
N = network.num_neighbors(pores=network.Ps, flatten=False)
return N
def pore_to_pore_distance(target):
r"""
Find the center to center distance between each pair of pores
"""
network = target.project.network
cn = network['throat.conns']
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
values = _norm(C1 - C2, axis=1)
return values
def distance_to_nearest_neighbor(target):
r"""
Find the distance between each pore and its closest topological neighbor
"""
network = target.project.network
cn = network['throat.conns']
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
D = _norm(C1 - C2, axis=1)
im = network.create_incidence_matrix()
values = _np.ones((network.Np, ))*_np.inf
_np.minimum.at(values, im.row, D[im.col])
return _np.array(values)
def distance_to_furthest_neighbor(target):
r"""
Find the distance between each pore and its furthest topological neighbor
"""
network = target.project.network
throats = network.map_throats(throats=target.Ts, origin=target)
cn = network['throat.conns'][throats]
C1 = network['pore.coords'][cn[:, 0]]
C2 = network['pore.coords'][cn[:, 1]]
D = _norm(C1 - C2, axis=1)
im = network.create_incidence_matrix()
values = _np.zeros((network.Np, ))
_np.maximum.at(values, im.row, D[im.col])
return _np.array(values)
def cluster_number(target):
r"""
Assign a cluster number to each pore
"""
net = target.network
from scipy.sparse import csgraph as csg
am = net.create_adjacency_matrix(fmt='coo', triu=True)
N, Cs = csg.connected_components(am, directed=False)
return Cs
def cluster_size(target, cluster=None):
r"""
Find the size of the cluster to which each pore belongs
Parameters
----------
network : dict
The OpenPNM network object
cluster : str, optional
Dict key pointing to the array containing the cluster number of each
pore. If not provided then it will be calculated.
Returns
-------
cluster_size : ndarray
An Np-long array containing the size of the cluster to which each pore
belongs
"""
net = target.network
if cluster is None:
from scipy.sparse import csgraph as csg
am = net.create_adjacency_matrix(fmt='coo', triu=True)
N, cluster_num = csg.connected_components(am, directed=False)
else:
cluster_num = net[cluster]
Cs, ind, N = _np.unique(cluster_num, return_inverse=True, return_counts=True)
values = N[ind]
return values
def isolated_pores(target):
r"""
find which pores, if any, are not connected to a throat
"""
net = target.network
values = _np.ones(net.Np, dtype=bool)
hits = _np.unique(net.conns)
values[hits] = False
return values
def reversed_throats(target):
r"""
Find any throat connections that are pointing from j -> i where j > i
"""
net = target.network
hits = net.conns[:, 0] > net.conns[:, 1]
return hits
def looped_throats(target):
r"""
Find any throats that are connected to the same pore on both ends
"""
net = target.network
hits = net.conns[:, 0] == net.conns[:, 1]
return hits
def headless_throats(target):
r"""
Find any throats that point to a non-existent pore
"""
net = target.network
hits = _np.any(net.conns > (net.Np -1), axis=1)
return hits
def duplicate_throats(target):
r"""
Find repeat occurrences of throat connections
"""
net = target.network
conns = net.conns
iconns = conns[:, 0] + 1j*conns[:, 1]
hits, inds = _np.unique(iconns, return_inverse=True)
values = _np.ones(net.Nt, dtype=bool)
values[inds] = False
return values
def distance_to_nearest_pore(target):
r"""
Find distance to and index of nearest pore even if not topologically
connected
"""
import scipy.spatial as sptl
net = target.network
coords = net.coords
tree = sptl.KDTree(coords)
ds, ids = tree.query(coords, k=2)
values = ds[:, 1]
return values
def count_coincident_pores(target, thresh=1e-6):
r"""
Count number of pores that are spatially coincident with other pores
Parameters
----------
network : dict
The OpenPNM network object
thresh : float
The distance below which two pores are considered spatially coincident
Returns
-------
count : ndarray
A numpy array of Np length containing the number coincident pores
"""
# This needs to be a bit complicated because it cannot be assumed
# the coincident pores are topologically connected
import scipy.spatial as sptl
net = target.network
coords = net.coords
tree = sptl.KDTree(coords)
hits = tree.query_pairs(r=thresh)
arr = _np.array(list(hits)).flatten()
v, n = _np.unique(arr, return_counts=True)
values = | _np.zeros(net.Np, dtype=int) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
from keras import backend as K
from keras.models import load_model
from keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
from tracker.centroidtracker import CentroidTracker
from tracker.trackableobject import TrackableObject
import cv2
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo-tiny.h5',
"anchors_path": 'model_data/tiny_yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
# print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
out_boxes2 = []
out_scores2 = []
out_classes2 = []
for i, c in enumerate(out_classes):
if c == 2:
out_boxes2.append(out_boxes[i])
out_scores2.append(out_scores[i])
out_classes2.append(out_classes[i])
# font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
# size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
if len(out_classes2) != 0:
for i, c in reversed(list(enumerate(out_classes2))):
predicted_class = self.class_names[c]
box = out_boxes2[i]
score = out_scores2[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
# label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, | np.floor(left + 0.5) | numpy.floor |
"""
After training with p1_training_inverse_model.py, run this for p1
Then run p1_make_vids.py to generate the videos
"""
import torch
import logging
import torch.nn as nn
from dataset import ObjPushDataset
from model_learners import InverseModel
from torch.utils.data import Dataset, DataLoader
from push_env import PushingEnv
import numpy as np
import pandas as pd
device = "cuda" if torch.cuda.is_available() else "cpu"
logger = logging.getLogger(__name__)
logging.basicConfig()
logger.setLevel(logging.INFO)
##### HYPERPARAMETERS ######
start_state_dims = 2
next_state_dims = 2
action_dims = 4
nn_layer_1_size = 64
nn_layer_2_size = 32
criterion = nn.MSELoss()
lr = 8e-4
seed = 0
num_epochs = 140
bsize = 512
num_pushes = 10
############################
def main():
logger.info("Instantiating model and importing weights")
# instantiate forward model and import pretrained weights
inv_model = InverseModel(start_state_dims=start_state_dims,
next_state_dims=next_state_dims,
action_dims=action_dims,
latent_var_1=nn_layer_1_size,
latent_var_2=nn_layer_2_size,
criterion=criterion,
lr=lr,
seed=seed)
inv_model.load_state_dict(torch.load("invmodel_learned_params.pt"))
# Load in data
logger.info("Importing test data")
test_dir = 'push_dataset/test'
# only want 1 push each time, so set batch_size to 1
test_loader = DataLoader(ObjPushDataset(test_dir), batch_size=1, shuffle=True)
env = PushingEnv()
errors = []
true_pushes = []
pred_pushes = []
logger.info("Running loop")
for i, (start_state, goal_state, true_action) in enumerate(test_loader):
logger.info(f'Iteration #{i}')
# Convert inputs to floats
start_state = start_state.float()
goal_state = goal_state.float()
true_action = true_action.float()
# Use inverse model to predict action given the start and goal states
combined_input = torch.cat((start_state, goal_state), dim=1)
pred_action = inv_model(combined_input)
# Switch output from tensors to numpy for easy use later
start_state = start_state.data.numpy()[0]
goal_state = goal_state.data.numpy()[0]
true_action = true_action.data.numpy()[0]
pred_action = pred_action.data.numpy()[0]
start_x, start_y, end_x, end_y = pred_action
_, end_state = env.execute_push(start_x, start_y, end_x, end_y)
end_state = np.array(end_state)
# Calculate errors
action_error = np.linalg.norm(true_action - pred_action)
state_error = | np.linalg.norm(goal_state - end_state) | numpy.linalg.norm |
import comet_ml, json
import numpy as np
import torch
from torch import optim
from misc import clear_gradients
from lib import create_agent
from util.env_util import create_env
from util.plot_util import load_checkpoint
from lib.distributions import kl_divergence
from local_vars import PROJECT_NAME, WORKSPACE, LOADING_API_KEY, LOGGING_API_KEY
alim = [-1, 1]
aint = 0.01
BATCH_SIZE = 256
N_ACTION_SAMPLES = 100
# Reacher Experiments:
# direct: 48edb0b9aca847c09c6893793c982884
# iterative: 58ec5bc5273044e59ae30a969c3d7de4
def estimate_opt_landscape(exp_key, states=None, ckpt_timestep=None, device_id=None):
"""
Estimates the optimization landscape for a checkpointed agent. Also gets the
policy estimates during inference optimization.
Args:
exp_key (str): the comet experiment ID
state (list of torch.Tensor, optional): the state(s) used for estimation
ckpt_timestep (int, optional): the checkpoint for estimation
device_id (int, optional): the GPU ID
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment (just to create agent)
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args, device_id=device_id)[0]
# load the checkpoint
load_checkpoint(agent, exp_key, ckpt_timestep)
if states is None:
# load a random state from the most recently collected episode
state_asset = None
if ckpt_timestep is not None:
# load the corresponding episode if it is present
state_asset_list = [a for a in asset_list if 'episode_step_' + str(ckpt_timestep) + '_state' in a['fileName']]
if len(state_asset_list) > 0:
state_asset = state_asset_list[0]
if state_asset is None:
# get most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = experiment.get_asset(state_asset['assetId'])
episode_states = json.loads(episode_states)
# state_timestep = [np.random.randint(len(episode_states)) for _ in range(100)]
state_timesteps = range(100)
states = [torch.from_numpy(np.array(episode_states[state_timestep])).view(1, -1).type(torch.FloatTensor) for state_timestep in state_timesteps]
# n_actions = int(((alim[1] - alim[0]) / aint) ** n_action_dims)
n_action_dims = env.action_space.shape[0]
a = np.arange(alim[0], alim[1], aint)
a_args = n_action_dims * [a]
a_coords = np.meshgrid(*a_args)
stacked_action_means = np.stack([a_coord.reshape(-1) for a_coord in a_coords]).T
n_batches = len(stacked_action_means) // BATCH_SIZE + 1
n_samples = agent.n_action_samples if N_ACTION_SAMPLES is None else N_ACTION_SAMPLES
q_estimates_list = []
log_ratios_list = []
approx_posts_list = []
for state_ind, state in enumerate(states):
if state_ind % 5 == 0:
print('Processing state ' + str(state_ind+1) + ' of ' + str(len(states)) + '.')
q_estimates = np.zeros(len(stacked_action_means))
log_ratios = np.zeros(len(stacked_action_means))
# perform inference on the state
batch_expanded_state = state.repeat(BATCH_SIZE, 1)
sample_expanded_state = batch_expanded_state.repeat(n_samples, 1)
agent.reset(batch_size=BATCH_SIZE); agent.eval()
agent.act(batch_expanded_state)
approx_posts = agent.inference_optimizer.dist_params
# loop over actions, get value estimates
for batch_ind in range(n_batches):
if batch_ind % 25 == 0:
print(' Processing batch ' + str(batch_ind+1) + ' of ' + str(n_batches) + '.')
# get a batch of actions
start_ind = batch_ind * BATCH_SIZE
end_ind = min((batch_ind + 1) * BATCH_SIZE, len(stacked_action_means))
action_mean_batch = stacked_action_means[start_ind:end_ind]
# evaluate the value estimate of the action in the state
if action_mean_batch.shape[0] != BATCH_SIZE:
# fill out the rest of the batch with zeros
temp_action_mean_batch = np.zeros((BATCH_SIZE, n_action_dims))
temp_action_mean_batch[:action_mean_batch.shape[0]] = action_mean_batch
action_mean_batch = temp_action_mean_batch
action_mean_batch = torch.from_numpy(action_mean_batch).type(torch.FloatTensor)
# reset approx post, sample actions
agent.reset(batch_size=BATCH_SIZE); agent.eval()
agent.approx_post.reset(batch_size=BATCH_SIZE, dist_params={'loc': action_mean_batch.clone().requires_grad_()})
# agent.inference_optimizer(agent, batch_expanded_state)
action_batch = agent.approx_post.sample(n_samples)
q_values = agent.q_value_estimator(agent, sample_expanded_state, action_batch)
q_values = q_values.view(n_samples, -1, 1).mean(dim=0)
kls = kl_divergence(agent.approx_post, agent.prior, n_samples=n_samples, sample=action_batch).sum(dim=1, keepdim=True)
q_estimates[start_ind:end_ind] = q_values[:end_ind-start_ind].view(-1).detach().cpu().numpy()
log_ratios[start_ind:end_ind] = kls[:end_ind-start_ind].view(-1).detach().cpu().numpy()
q_estimates = q_estimates.reshape(n_action_dims * [int((alim[1] - alim[0]) / aint)])
log_ratios = log_ratios.reshape(n_action_dims * [int((alim[1] - alim[0]) / aint)])
q_estimates_list.append(q_estimates)
log_ratios_list.append(log_ratios)
approx_posts_list.append(approx_posts)
return {'q_estimates': q_estimates_list,
'log_ratios': log_ratios_list,
'alpha_pi': agent.alphas['pi'].detach().cpu().numpy(),
'approx_posts': approx_posts_list}
def vis_inference(exp_key, action_indices, state_ind=0):
"""
Plots a 2D analysis of direct inference, comparing with gradient ascent.
Args:
exp_key (str): the experiment key
state_ind (int): state index to plot
action_indices (list): two action indices to vary
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args)[0]
# load the checkpoint
load_checkpoint(agent, exp_key)
# load the state from the most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = json.loads(experiment.get_asset(state_asset['assetId']))
state = torch.from_numpy(np.array(episode_states[state_ind])).view(1, -1).type(torch.FloatTensor)
print('STATE: ')
print(state)
# perform inference, get the direct approx post
agent.reset(); agent.eval()
agent.act(state)
loc = agent.approx_post.dist.loc.detach().clone(); scale = agent.approx_post.dist.scale.detach().clone()
direct_approx_post = {'loc': loc.clone().cpu().numpy(),
'scale': scale.clone().cpu().numpy()}
print('DIRECT APPROX. POST.: ')
print(direct_approx_post)
print('Performing gradient-based optimization...')
LR = 0.1
agent.n_action_samples = 100
# update the approx post using gradient descent on the 2 dims of the mean
sgd_objectives = [np.inf]
sgd_locs = [agent.approx_post.dist.loc.detach().cpu().numpy()]
# dist_params = {k: v.data.requires_grad_() for k, v in agent.approx_post.get_dist_params().items()}
sgd_loc = agent.approx_post.dist.loc.clone().detach().requires_grad_()
sgd_scale = agent.approx_post.dist.scale.clone().detach()
dist_params = {'loc': sgd_loc, 'scale': sgd_scale}
agent.approx_post.reset(dist_params=dist_params)
# dist_param_list = [param for _, param in dist_params.items()]
# just perform SGD on the mean
dist_param_list = [sgd_loc]
# optimizer = optim.SGD(dist_param_list, lr=LR, momentum=0.9)
optimizer = optim.Adam(dist_param_list, lr=LR)
optimizer.zero_grad()
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
sgd_objectives.append(-obj.detach())
for _ in range(250):
obj.sum().backward(retain_graph=True)
for a_dim in range(agent.approx_post.dist.loc.shape[1]):
if a_dim not in action_indices:
agent.approx_post.dist.loc.grad[:, a_dim] = 0.
optimizer.step()
optimizer.zero_grad()
agent.approx_post._sample = None
# reset the non-optimized dimensions
# for a_dim in range(agent.approx_post.dist.loc.shape[1]):
# if a_dim not in action_indices:
# agent.approx_post.dist.loc[:, a_dim] = loc[:, a_dim]
# agent.approx_post.dist.scale = scale
sgd_locs.append(agent.approx_post.dist.loc.clone().detach().cpu().numpy())
# reset the optimizer, pretty hacky...
# sgd_loc = agent.approx_post.dist.loc.clone().detach().requires_grad_()
# sgd_scale = agent.approx_post.dist.scale.clone().detach()
# dist_params = {'loc': sgd_loc, 'scale': sgd_scale}
# agent.approx_post.reset(dist_params=dist_params)
# # dist_param_list = [param for _, param in dist_params.items()]
# # just perform SGD on the mean
# dist_param_list = [sgd_loc]
# optimizer = optim.Adam(dist_param_list, lr=LR)
# optimizer.zero_grad()
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
sgd_objectives.append(-obj.detach())
clear_gradients(agent.generative_parameters())
agent.n_action_samples = 10
print('Done.')
print('Estimating objectives...')
agent.n_action_samples = 10
# get all action means
a = np.arange(alim[0], alim[1], aint)
a_args = 2 * [a]
a_coords = np.meshgrid(*a_args)
stacked_action_means = np.stack([a_coord.reshape(-1) for a_coord in a_coords]).T
n_batches = len(stacked_action_means) // BATCH_SIZE + 1
n_samples = agent.n_action_samples
batch_expanded_state = state.repeat(BATCH_SIZE, 1)
batch_expanded_loc = loc.repeat(BATCH_SIZE, 1)
batch_expanded_scale = scale.repeat(BATCH_SIZE, 1)
objectives = np.zeros((len(stacked_action_means), 1))
# estimate the objective at all action means
for batch_ind in range(n_batches):
if batch_ind % 25 == 0:
print(' Processing batch ' + str(batch_ind+1) + ' of ' + str(n_batches) + '.')
# get a batch of actions
start_ind = batch_ind * BATCH_SIZE
end_ind = min((batch_ind + 1) * BATCH_SIZE, len(stacked_action_means))
action_mean_batch = stacked_action_means[start_ind:end_ind]
if action_mean_batch.shape[0] != BATCH_SIZE:
# fill out the rest of the batch with zeros if at the end
temp_action_mean_batch = np.zeros((BATCH_SIZE, 2))
temp_action_mean_batch[:action_mean_batch.shape[0]] = action_mean_batch
action_mean_batch = temp_action_mean_batch
action_mean_batch = torch.from_numpy(np.arctanh(action_mean_batch + 1e-6)).type(torch.FloatTensor)
# reset approx post, sample actions
agent.reset(batch_size=BATCH_SIZE); agent.eval()
loc_batch = batch_expanded_loc
loc_batch[:, action_indices[0]] = action_mean_batch[:, 0]
loc_batch[:, action_indices[1]] = action_mean_batch[:, 1]
scale_batch = batch_expanded_scale
agent.approx_post.reset(batch_size=BATCH_SIZE, dist_params={'loc': loc_batch.clone().requires_grad_(),
'scale': scale_batch.clone()})
action_batch = agent.approx_post.sample(n_samples)
# evaluate the value estimate of the action in the state
objective = agent.estimate_objective(batch_expanded_state, action_batch).view(n_samples, -1, 1).mean(dim=0).detach().cpu().numpy()
objectives[start_ind:end_ind] = objective[:end_ind-start_ind]
objectives = objectives.reshape(2 * [int((alim[1] - alim[0]) / aint)])
agent.n_action_samples = 10
print('Done.')
return {'objectives': objectives,
'stacked_action_means': stacked_action_means,
'direct_approx_post': direct_approx_post,
'action_indices': action_indices,
'sgd_approx_post_means': sgd_locs,
'sgd_objectives': sgd_objectives}
# from util.analysis import vis_it_inference
# import numpy as np
# import pickle
#
# state = [0.7380273938179016,
# 0.9774200916290283,
# 0.014780346304178238,
# -0.053389377892017365,
# -0.20391426980495453,
# 0.09159323573112488,
# 1.2636744976043701,
# 0.49291884899139404,
# -0.8514286279678345,
# 0.027635907754302025,
# -0.523140549659729,
# -0.26849716901779175,
# 0.7275161147117615,
# 1.7905492782592773,
# 1.1246192455291748,
# 1.2539386749267578,
# -0.29752910137176514,
# 0.5522995591163635,
# -1.4331533908843994,
# 1.4389076232910156,
# 1.911720633506775,
# -1.2782995700836182,
# -3.6260697841644287,
# -2.3452537059783936,
# -0.010221259668469429,
# 3.8292510509490967,
# -1.393014907836914]
#
# np_state = np.array(state)
def vis_it_inference(exp_key, action_indices, state_ind=0, state=None):
"""
Plots a 2D analysis of iterative inference.
Args:
exp_key (str): the experiment key
state_ind (int): state index to plot
action_indices (list): two action indices to vary
state (np.array): the state to evaluate
"""
# load the experiment
comet_api = comet_ml.API(api_key=LOADING_API_KEY)
experiment = comet_api.get_experiment(project_name=PROJECT_NAME,
workspace=WORKSPACE,
experiment=exp_key)
# create the environment
param_summary = experiment.get_parameters_summary()
env_name = [a for a in param_summary if a['name'] == 'env'][0]['valueCurrent']
env = create_env(env_name)
# create the agent
asset_list = experiment.get_asset_list()
agent_config_asset_list = [a for a in asset_list if 'agent_args' in a['fileName']]
agent_args = None
if len(agent_config_asset_list) > 0:
# if we've saved the agent config dict, load it
agent_args = experiment.get_asset(agent_config_asset_list[0]['assetId'])
agent_args = json.loads(agent_args)
agent_args = agent_args if 'opt_type' in agent_args['inference_optimizer_args'] else None
agent = create_agent(env, agent_args=agent_args)[0]
# load the checkpoint
load_checkpoint(agent, exp_key)
if state is None:
# load the state from the most recently collected episode
asset_times = [asset['createdAt'] for asset in asset_list if 'state' in asset['fileName']]
state_asset = [a for a in asset_list if a['createdAt'] == max(asset_times)][0]
episode_states = json.loads(experiment.get_asset(state_asset['assetId']))
state = torch.from_numpy(np.array(episode_states[state_ind])).view(1, -1).type(torch.FloatTensor)
else:
state = torch.from_numpy(state).view(1,-1).type(torch.FloatTensor)
print('STATE: ')
print(state)
# perform iterative inference, get the approx post
agent.reset(); agent.eval()
agent.act(state)
loc = agent.approx_post.dist.loc.detach().clone(); scale = agent.approx_post.dist.scale.detach().clone()
it_approx_post = {'loc': loc.clone().cpu().numpy(),
'scale': scale.clone().cpu().numpy()}
print('ITERATIVE APPROX. POST.: ')
print(it_approx_post)
print('Performing iterative inference...')
# only optimize two of the means
agent.n_action_samples = 10
total_it_locs = []
total_it_objs = []
for inf_seed in range(10):
agent.reset(); agent.eval()
# random Gaussian init for the mean
agent.approx_post.reset(dist_params={'loc': 0.3*agent.approx_post.dist.loc.clone().detach().normal_(),
'scale': agent.approx_post.dist.scale.clone().detach()})
iterative_locs = []
iterative_objectives = []
# for inf_it in range(agent.inference_optimizer.n_inf_iters):
if False:
# gradient-based
LR = 0.05
sgd_loc = agent.approx_post.dist.loc.clone().detach().requires_grad_()
sgd_scale = agent.approx_post.dist.scale.clone().detach()
dist_params = {'loc': sgd_loc, 'scale': sgd_scale}
agent.approx_post.reset(dist_params=dist_params)
# only perform SGD on the mean
dist_param_list = [sgd_loc]
optimizer = optim.Adam(dist_param_list, lr=LR)
optimizer.zero_grad()
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
iterative_locs.append(agent.approx_post.dist.loc.clone().detach().cpu().numpy())
for _ in range(50):
obj.sum().backward(retain_graph=True)
for a_dim in range(agent.approx_post.dist.loc.shape[1]):
if a_dim not in action_indices:
agent.approx_post.dist.loc.grad[:, a_dim] = 0.
optimizer.step()
optimizer.zero_grad()
agent.approx_post._sample = None
iterative_locs.append(agent.approx_post.dist.loc.clone().detach().cpu().numpy())
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
clear_gradients(agent.generative_parameters())
else:
# amortized
for inf_it in range(50):
# reset the approx post dist
it_loc = agent.approx_post.dist.loc.clone().detach()
for a_dim in range(it_loc.shape[1]):
if a_dim not in action_indices:
it_loc[:, a_dim] = loc[:, a_dim]
it_scale = scale
dist_params = {'loc': it_loc.requires_grad_(), 'scale': it_scale.requires_grad_()}
agent.approx_post.reset(dist_params=dist_params)
iterative_locs.append(agent.approx_post.dist.loc.detach().cpu().numpy())
# estimate the objective, backprop
actions = agent.approx_post.sample(agent.n_action_samples)
obj = agent.estimate_objective(state, actions)
obj = - obj.view(agent.n_action_samples, -1, 1).mean(dim=0)
iterative_objectives.append(-obj.detach())
obj.sum().backward(retain_graph=True)
# update
params, grads = agent.approx_post.params_and_grads()
inf_input = agent.inference_optimizer.inference_model(params=params, grads=grads, state=state)
agent.approx_post.step(inf_input)
agent.approx_post.retain_grads()
# reset the approx post dist
it_loc = agent.approx_post.dist.loc.clone().detach()
for a_dim in range(it_loc.shape[1]):
if a_dim not in action_indices:
it_loc[:, a_dim] = loc[:, a_dim]
it_scale = scale
dist_params = {'loc': it_loc, 'scale': it_scale}
agent.approx_post.reset(dist_params=dist_params)
iterative_locs.append(agent.approx_post.dist.loc.detach().cpu().numpy())
total_it_locs.append(np.array(iterative_locs))
total_it_objs.append( | np.array(iterative_objectives) | numpy.array |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.