prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
#!/usr/bin/env python
from __future__ import print_function
import rospy
import yaml
import numpy as np #np.dot
import os.path
from math import cos, sin
from sensor_msgs.msg import JointState
from integ_gkd_models.srv import Dynamic_inverse,Dynamic_inverseResponse
path=os.path.dirname(__file__)
with open(os.path.join(path,'RobotParam.yml')) as f :
yaml_dict = yaml.safe_load(f)
l1 = yaml_dict.get("l1")
l2 = yaml_dict.get("l2")
m1 = yaml_dict.get("m1")
m2 = yaml_dict.get("m2")
Iz1 = yaml_dict.get("Iz1")
Iz2 = yaml_dict.get("Iz2")
g = yaml_dict.get("g")
c1 = yaml_dict.get("c1")
c2 = yaml_dict.get("c2")
def handle_Dynamic_inverse(req):
theta = req.input.position
theta_d = req.input.velocity
efforts = req.input.effort
Z1 = m1*c1**2 + m2*(l1**2+c2**2+2*l1*c2*cos(theta[1])) + Iz1 + Iz2
Z2 = m2*(c2**2+l1*c2*cos(theta[1])) + Iz2
Z3 = m2*c2**2 + Iz2
Z4 = m2*c2*g*cos(theta[0]+theta[1])+(m1*c1+m2*l1)*g*cos(theta[0])
Z5 = m2*c2*g*cos(theta[0]+theta[1])
h = -m2*l1*c2*sin(theta[1])
D=[[ Z1 , Z2 ],[ Z2 , Z3 ]]
C=[[h * theta_d[1], h * (theta_d[0]+theta_d[1]) ],[ -h * theta_d[0], 0]]
G=[ Z4 , Z5 ]
output=JointState()
Gamma = | np.linalg.inv(D) | numpy.linalg.inv |
import torch
import numpy as np
import math
import pylib.HumanAug
def get_preds(scores):
''' get predictions from score maps in torch Tensor
return type: torch.LongTensor
'''
assert scores.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)
maxval = maxval.view(scores.size(0), scores.size(1), 1)
idx = idx.view(scores.size(0), scores.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:, :, 0] = (preds[:, :, 0] - 1) % scores.size(3) + 1
preds[:, :, 1] = torch.floor((preds[:, :, 1] - 1) / scores.size(2)) + 1
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
return preds
def calc_dists(preds, target, normalize, use_zero=False):
preds = preds.float()
target = target.float()
dists = torch.zeros(preds.size(1), preds.size(0))
if use_zero:
boundary = 0
else:
boundary = 1
for n in range(preds.size(0)):
for c in range(preds.size(1)):
if target[n, c, 0] > boundary and target[n, c, 1] > boundary:
dists[c, n] = torch.dist(preds[n, c, :], target[n, c, :]) / normalize[n]
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
if dists.ne(-1).sum() > 0:
# denominator = dists.ne(-1).sum()
# numerator = 0
# for i in range(0, dists.size(0)):
# if dists[i] < thr and dists[i] != -1:
# numerator += 1
return dists.le(thr).eq(dists.ne(-1)).sum() * 1.0 / dists.ne(-1).sum()
# return numerator / denominator
else:
return -1
def accuracy(output, target, idxs, thr=0.5):
''' Calculate accuracy according to PCK, but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs', followed by individual accuracies
'''
preds = get_preds(output)
gts = get_preds(target)
norm = torch.ones(preds.size(0)) * output.size(3) / 10
dists = calc_dists(preds, gts, norm)
acc = torch.zeros(len(idxs) + 1)
avg_acc = 0
cnt = 0
for i in range(len(idxs)):
acc[i + 1] = dist_acc(dists[idxs[i]])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
if cnt != 0:
acc[0] = avg_acc / cnt
return acc
def accuracy_origin_res(output, center, scale, res, grnd_pts, normalizers, rot):
''' Calculate accuracy according to PCK, but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs', followed by individual accuracies
'''
idxs = torch.LongTensor([0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15])
pred_pts = final_preds(output, center, scale, res, rot)
dists = calc_dists(pred_pts, grnd_pts, normalizers, use_zero=True)
acc = torch.zeros(len(idxs) + 1)
avg_acc = 0
cnt = 0
for i in range(len(idxs)):
acc[i + 1] = dist_acc(dists[idxs[i]])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
if cnt != 0:
acc[0] = avg_acc / cnt
return acc
def per_person_pckh(output, grnd_heatmap, center, scale, res,
grnd_pts, normalizers, rot, thr=0.5):
idxs = torch.LongTensor([0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15])
pred_pts = final_preds(output, center, scale, res, rot)
sample_num = pred_pts.size(0)
dists = calc_dists(pred_pts, grnd_pts, normalizers, use_zero=True)
grnd_pts_aug = get_preds(grnd_heatmap)
grnd_pts_indicators = torch.zeros(pred_pts.size(1), pred_pts.size(0))
for n in range(0, pred_pts.size(0)):
for c in range(0, pred_pts.size(1)):
if grnd_pts_aug[n, c, 0] > 1 and grnd_pts_aug[n, c, 1] > 1:
grnd_pts_indicators[c, n] = 1
count_vec = torch.zeros(sample_num)
accuracy_vec = torch.zeros(sample_num)
for i in range(0, sample_num):
# print dists[:, i], dists[:, i].size(), dists[:, i].index_select(0, idxs)
# exit()
per_person_dists = dists[:, i].index_select(0, idxs)
per_person_indicator = grnd_pts_indicators[:, i].index_select(0, idxs)
sum_1 = torch.ne(per_person_dists, -1).sum()
sum_2 = torch.ne(per_person_indicator, 0).sum()
if sum_1 > 0 and sum_2 > 0:
all_indicator = per_person_dists.ne(-1) & per_person_indicator.ne(0)
all_count = all_indicator.sum()
valid_indicator = per_person_dists.le(thr) & all_indicator
valid_count = valid_indicator.sum()
if valid_count > all_count:
print('valid_count is larger than all_count')
print('valid_count: ', valid_count)
print('all_count: ', all_count)
exit()
# assert per_person_indicator.ne(0).sum() <= per_person_dists.ne(-1).sum()
if per_person_dists.ne(-1).sum() != all_count:
print('some pts are transformed out of scope')
print('count before mask: ', per_person_dists.ne(-1).sum())
print('count after mask: ', all_count)
if per_person_indicator.ne(0).sum() > per_person_dists.ne(-1).sum():
print('per_person_indicator: ', per_person_indicator)
print('per_person_indicator.ne(0): ', per_person_indicator.ne(0))
print('per_person_dists.ne(-1): ', per_person_dists.ne(-1))
print('grnd_pts_aug_0: ', pts_aug[i])
print('grnd_pts_aug_1: ', grnd_pts_aug[i])
print('grnd_pts: ', grnd_pts[i])
# print 'per_person_dists: ', per_person_dists
# print 'per_person_indicator: ', per_person_indicator
# print 'per_person_dists.le(thr): ', per_person_dists.le(thr)
# print 'per_person_dists.le(thr).eq(per_person_dists.ne(-1)): ', \
# per_person_dists.le(thr).eq(per_person_dists.ne(-1))
#
# print 'per_person_dists.ne(-1): ', per_person_dists.ne(-1)
# print 'per_person_indicator.ne(0): ', per_person_indicator.ne(0)
# print 'per_person_dists.ne(-1).eq(per_person_indicator.ne(0)):', \
# per_person_dists.ne(-1).eq(per_person_indicator.ne(0))
# print torch.ne(per_person_indicator, 0)
# print 'sum_2: ', sum_2
exit()
# print(valid_count)
# print(type(valid_count))
# exit()
accuracy_vec[i] = float(valid_count) / float(all_count)
count_vec[i] = valid_count
# print(per_joint_dists.le(threshold).eq(per_joint_dists.ne(-1)).sum())
# print('joint {0} accuracy is {1}' .format(idxs[i]+1, per_joint_acc))
else:
accuracy_vec[i] = 0
count_vec[i] = 0
# we need to compare the normalized accuracy instead of the raw count,
# since the denominator may for the different transformations.
return accuracy_vec
def final_preds(output, center, scale, res, rot):
coords = get_preds(output) # float type
# pose-processing
for n in range(coords.size(0)):
for p in range(coords.size(1)):
hm = output[n][p]
px = int(math.floor(coords[n][p][0]))
py = int(math.floor(coords[n][p][1]))
if px > 1 and px < res[0] and py > 1 and py < res[1]:
diff = torch.Tensor([hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1] - hm[py - 2][px - 1]])
coords[n][p] += diff.sign() * .25
coords += 0.5
preds = coords.clone()
# Transform back
# print coords.size(), len(center), len(scale)
for i in range(coords.size(0)):
# print type(coords[i]), type(center[i]), type(scale[i])
preds[i] = transform_preds(coords[i], center[i], scale[i], res, rot[i])
if preds.dim() < 3:
preds = preds.view(1, preds.size())
return preds
def transform_preds(coords, center, scale, res, rot):
# size = coords.size()
# coords = coords.view(-1, coords.size(-1))
# print(coords.size())
coords = coords.numpy()
# print type(coords), type(center), type(scale)
# exit()
center = center.numpy()
scale = scale.numpy()
rot = rot.numpy()
coords = TransformPts(coords, center, scale, rot, res[0], size=200, invert=1)
# exit()
coords = torch.from_numpy(coords)
# for p in range(coords.size(0)):
# # coords[p, 0:2] = torch.from_numpy(transform(coords[p, 0:2], center, scale, res, 1, 0))
return coords
def GetTransform(center, scale, rot, res, size):
# Generate transformation matrix
h = size * scale # size_src = size_dst * scale
t = np.zeros((3, 3))
# print res, float(res), type(res), float(res) / h
t[0, 0] = float(res) / h
t[1, 1] = float(res) / h
t[0, 2] = res * (-float(center[0]) / h + .5)
t[1, 2] = res * (-float(center[1]) / h + .5)
t[2, 2] = 1
if not rot == 0:
rot = -rot # To match direction of rotation from cropping
rot_mat = np.zeros((3, 3))
rot_rad = rot * np.pi / 180
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
rot_mat[0, :2] = [cs, -sn]
rot_mat[1, :2] = [sn, cs]
rot_mat[2, 2] = 1
# Need to rotate around center
t_mat = np.eye(3)
t_mat[0, 2] = -res / 2
t_mat[1, 2] = -res / 2
t_inv = t_mat.copy()
t_inv[:2, 2] *= -1
t = np.dot(t_inv, np.dot(rot_mat, np.dot(t_mat, t)))
return t
def TransformPts(pts, center, scale, rot, res, size, invert=0):
NLMK, DIM = pts.shape
t = GetTransform(center, scale, rot, res, size)
if invert:
t = | np.linalg.inv(t) | numpy.linalg.inv |
import numpy as np
def autocorr(sequence):
"""
Calculate auto-correlation for a given sequence by way of convolution (FFT). High auto-correlation after N
time shifts implies periodicity in the sequence where N is the period.
Parameters:
sequence (numpy array): the sequence to auto-correlate
returns:
r (float): a value that express the degree of auto-correlation
lag (int): the period after which the signal resembles itself the most
"""
n = sequence.size
sequence = (sequence - np.mean(sequence)) # normalize the sequence
result = np.correlate(sequence, sequence, mode='same')
acorr = result[n//2 + 1:] / (sequence.var() * np.arange(n-1, n//2, -1))
lag = acorr.argmax() + 1
r = acorr[lag-1]
'''
if np.abs(r) > 0.5:
print('Appears to be autocorrelated with r = {}, lag = {}'. format(r, lag))
else:
print('Appears to be not autocorrelated')
'''
return r, lag
def discrete_differential(sequence):
differential_sequence = np.empty(len(sequence))
for i in range(len(sequence)-1):
differential_sequence[i] = sequence[i+1] - sequence[i]
differential_sequence[-1] = 0
return differential_sequence
def find_extrema(differential_sequence):
extrema_indeces = []
for i in range(len(differential_sequence)-1):
if differential_sequence[i] * differential_sequence[i+1] < 0 or differential_sequence[i] == 0:
extrema_indeces.append(i)
return extrema_indeces
def is_oscillating(sequence):
extrema = find_extrema(discrete_differential(sequence))
corr_coeffficient, period = autocorr(sequence)
oscillator = True
if len(extrema) <= 1:
oscillator = False
else:
for i in range(len(extrema)-2):
if extrema[i+1] == extrema[i]+1: # there should not be extrema at neighbouring indeces
oscillator = False
break
if corr_coeffficient < 0.5:
periodic = False
else:
periodic = True
return corr_coeffficient, period, oscillator and periodic
def freq_analysis(time_series, sampling_freq):
assert isinstance(time_series, np.ndarray)
N = time_series.size
fourier_coeffs = | np.fft.fft(time_series) | numpy.fft.fft |
import unittest
import numpy as np
from topology_radial_level_set import RadialLevelSetTopology
class TestRadialLevelSetTopology(unittest.TestCase):
def test_all(self):
c_param = 1e-15
rlst = RadialLevelSetTopology(2, 2, 3, 4, 5e-6, 5e-6, c_param)
self.assertTrue(rlst._a == 5)
# Test initialization of element coordinates.
xc = np.array([5, 15, 25, 5, 15, 25, 5, 15, 25, 5, 15, 25])
yc = np.array([5, 5, 5, 15, 15, 15, 25, 25, 25, 35, 35, 35])
self.assertTrue(np.all(xc == rlst._xelems))
self.assertTrue(np.all(yc == rlst._yelems))
# Test initialization of knot coordinates.
xd = np.array([10, 20, 10, 20])
yd = np.array([40/3, 40/3, 80/3, 80/3])
self.assertTrue(np.all(xd == rlst._xcoords))
self.assertTrue(np.all(yd == rlst._ycoords))
# Test initialization of hmat.
amat = np.zeros((4, 4))
for i, j in np.ndindex(4, 4):
r2 = (xd[i] - xd[j]) ** 2 + (yd[i] - yd[j]) ** 2
amat[i, j] = np.sqrt(r2 + c_param ** 2)
pmat = np.array([[1, 10, 40/3], [1, 20, 40/3],
[1, 10, 80/3], [1, 20, 80/3]])
zmat = np.zeros((3, 3))
hmat = np.vstack((np.hstack((amat, pmat)), np.hstack((pmat.T, zmat))))
self.assertTrue(np.all(hmat == rlst._hmat))
# Test initialization of gmat.
amat = np.zeros((12, 4))
pmat = np.zeros((12, 3))
for i, j in np.ndindex(12, 4):
r2 = (xc[i] - xd[j]) ** 2 + (yc[i] - yd[j]) ** 2
amat[i, j] = np.sqrt(r2 + c_param ** 2)
for i in range(12):
pmat[i, 0] = 1
pmat[i, 1] = xc[i]
pmat[i, 2] = yc[i]
gmat = np.hstack((amat, pmat))
self.assertTrue(np.all(gmat == rlst._gmat))
# Lets test rashape puts coordinates of elements back in correct
# position.
xcc = np.atleast_2d(xc).T.reshape(rlst._dim_elems, order='F')
ycc = yc.reshape(rlst._dim_elems, order='F')
xccc = np.array([[5, 5, 5, 5], [15, 15, 15, 15], [25, 25, 25, 25]])
yccc = np.array([[5, 15, 25, 35], [5, 15, 25, 35], [5, 15, 25, 35]])
self.assertTrue(np.all(xcc == xccc))
self.assertTrue(np.all(ycc == yccc))
# Lets test a topology that is biased towards x=0.
# Remember the tip of the AFM cantilever is added afte the fact.
f = np.array([0.1, 0.25, -0.2, -0.3])
t1 = np.array([[1, 1, 0, 0], [1, 1, 0, 1], [1, 1, 0, 0]])
rlst.update_topology(f)
self.assertTrue(np.all(t1 == rlst.topology))
# Lets test a topology that is biased towards y=0.
# Remember the tip of the AFM cantilever is added afte the fact.
f = np.array([0.1, -0.25, 0.2, -0.3])
t2 = np.array([[1, 1, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]])
rlst.update_topology(f)
self.assertTrue( | np.all(t2 == rlst.topology) | numpy.all |
import os
import astropy.constants as const
import astropy.units as u
import numpy as np
from astropy.coordinates import GCRS, ITRS, SkyOffsetFrame, SkyCoord, EarthLocation, Angle, get_sun
from astropy.time import Time
from sora.config import input_tests
__all__ = ['plot_occ_map']
def xy2latlon(x, y, loncen, latcen, time):
"""Calculates the longitude and latitude given projected positions x and y.
Parameters
----------
x : `int`, `float`
Projected position in x, in the GCRS, in meters.
y : `int`, `float`
Projected position in y, in the GCRS, in meters.
loncen : `int`, `float`
Center longitude of projection, in degrees.
latcen : `int`, `float`
Center latitude of projection, in degrees.
time : `astropy.time.Time`
Time of referred projection.
Returns
-------
lon, lat : `list`
Longitude and Latitude whose projection at loncen, lat results
in x, y. (deg).
"""
r = const.R_earth.to(u.m).value
site_cen = EarthLocation(loncen*u.deg, latcen*u.deg)
itrs_cen = site_cen.get_itrs(obstime=time)
gcrs_cen = itrs_cen.transform_to(GCRS(obstime=time))
z = np.array(y, ndmin=1)
y = np.array(x, ndmin=1)
x2 = r*r-y*y-z*z
a = np.where(x2 >= 0.0)
x = np.sqrt(x2[a])
y = y[a]
z = z[a]
lon = np.repeat(1e+31, len(x2))
lat = np.repeat(1e+31, len(x2))
center_frame = SkyOffsetFrame(origin=gcrs_cen)
if len(x) > 0:
n = 0
if not time.isscalar and len(time) == len(x2):
time = time[a]
while True:
n += 1
new_pos = SkyCoord(x*u.m, y*u.m, z*u.m, representation_type='cartesian', frame=center_frame[a])
n_coord = new_pos.transform_to(GCRS(obstime=time))
n_itrs = n_coord.transform_to(ITRS(obstime=time))
n_site = n_itrs.earth_location
n_site = EarthLocation(n_site.lon, n_site.lat, 0)
itrs_site = n_site.get_itrs(obstime=time)
gcrs_site = itrs_site.transform_to(GCRS(obstime=time))
target1 = gcrs_site.transform_to(center_frame[a])
if n == 4:
lon[a] = n_site.lon.deg
lat[a] = n_site.lat.deg
break
x = target1.cartesian.x.to(u.m).value
return lon, lat
def latlon2xy(lon, lat, loncen, latcen):
"""Calculates the projection of longitude and latitude in the loncen,
latcen direction.
Parameters
----------
lon : `int`, `float`
Longitude to calculate projection.
lat : `int`, `float`
Latitude to calculate projection.
loncen : `int`, `float`
Center longitude of projection, in degrees.
latcen : `int`, `float`
Center latitude of projection, in degrees.
Returns
-------
x, y : `list`
Projection of lon, lat at loncen, latcen, in the ITRS (meters).
"""
site_cen = EarthLocation(loncen*u.deg, latcen*u.deg)
itrs_cen = site_cen.get_itrs()
lon = np.array(lon, ndmin=1)
lat = np.array(lat, ndmin=1)
site = EarthLocation(lon*u.deg, lat*u.deg, height=0*u.m)
itrs_site = site.get_itrs()
target = itrs_site.transform_to(SkyOffsetFrame(origin=itrs_cen))
y = target.cartesian.y.to(u.m).value
z = target.cartesian.z.to(u.m).value
k = np.where(target.cartesian.x.to(u.m).value < 0.0)
y[k] = 1e+31
z[k] = 1e+31
return y, z
def plot_occ_map(name, radius, coord, time, ca, pa, vel, dist, mag=0, longi=0, **kwargs):
"""Plots the map of the occultation.
Parameters
----------
name : `str`
Name of the object.
radius : `int`, `float`
Radius of the object, in km.
coord : `str`, `astropy.coordinates.SkyCoord`
Coordinates of the star (``"hh mm ss.sss dd mm ss.sss"`` or
``"hh.hhhhhhhh dd.dddddddd"``).
time : `str`, `astropy.time.Time`
Instant of Closest Approach (iso or isot format).
ca : `int`, `float`
Closest Approach Distance, in arcsec.
pa : `int`, `float`
Position Angle at C/A, in degrees.
vel : `int`, `float`
Velocity of the event, in km/s.
dist : `int`, `float`
Object distance at C/A, in AU.
mag : `int`, `float`, default=0
Mag* = Normalized magnitude to vel=20km/s.
longi : `int`, `float`, default=0
East longitude of sub-planet point, deg, positive towards East.
nameimg : `str`
Change the name of the imaged saved.
path : `str`
Path to a directory where to save map.
resolution : `int`, default=2
Cartopy feature resolution.\n
- ``1`` means a resolution of "10m";\n
- ``2`` a resolution of "50m";\n
- ``3`` a resolution of "100m".
states : `bool`
If True, plots the states borders of the countries. The states
of some countries will only be shown depending on the resolution.
zoom : `int`, `float`
Zooms in or out of the map.
centermap_geo : `list`, default=None
Center the map given coordinates in longitude and latitude. It must be
a list with two numbers.
centermap_delta : `list`, default=None
Displace the center of the map given displacement in X and Y, in km.
It must be a list with two numbers.
centerproj : `list`
Rotates the Earth to show occultation with the center projected at a
given longitude and latitude. It must be a list with two numbers.
labels : `bool`, default=True
Plots text above and below the map with the occultation parameters.
meridians : `int`, default=30
Plots lines representing the meridians for given interval, in degrees.
parallels : `int`, default=30
Plots lines representing the parallels for given interval, in degrees.
sites : `dict`
Plots site positions in map. It must be a python dictionary where the
key is the `name` of the site, and the value is a list with `longitude`,
`latitude`, `delta_x`, `delta_y` and `color`. `delta_x` and `delta_y`
are displacement, in km, from the point position of the site in the map
and the `name`. `color` is the color of the point.
site_name : `bool`
If True, it prints the name of the sites given, else it plots only the points.
site_box_alpha : `int`, `float`, default=0
Sets the transparency of a box surrounding each station name. 0 equals to
transparent, and 1 equals to opaque.
countries : `dict`
Plots the names of countries. It must be a python dictionary where the
key is the name of the country and the value is a list with longitude
and latitude of the lower left part of the text.
offset : `list`
Applies an offset to the ephemeris, calculating new CA and instant of
CA. It is a pair of `delta_RA*cosDEC` and `delta_DEC`.
mapstyle : `int`, default=1
Define the color style of the map. ``'1'`` is the default black
and white scale. ``'2'`` is a colored map.
error : `int`, `float`
Ephemeris error in mas. It plots a dashed line representing radius + error.
ercolor : `str`
Changes the color of the lines of the error bar.
ring : `int`, `float`
Plots a dashed line representing the location of a ring. It is given
in km, from the center.
rncolor : `str`
Changes the color of ring lines.
atm : `int`, `float`
Plots a dashed line representing the location of an atmosphere. It is
given in km, from the center.
atcolor : `str`
Changes the color of atm lines.
chord_delta : `list`
List with distances from center to plot chords.
chord_geo : `2d-list`
List with pairs of coordinates to plot chords.
chcolor : `str`, default='grey'
Color of the line of the chords.
heights : `list`
It plots a circular dashed line showing the locations where the observer
would observe the occultation at a given height above the horizons.
This must be a list.
hcolor : `str`
Changes the color of the height lines.
mapsize : `list`, default= [46.0, 38.0]
The size of figure, in cm. It must be a list with two values.
cpoints : `int`, `float`, default=60
Interval for the small points marking the center of shadow, in seconds.
ptcolor : `str`
Change the color of the center points.
alpha : `float`, default=0.2
The transparency of the night shade, where 0.0 is full transparency and
1.0 is full black.
fmt : `str`, default:'png'
The format to save the image. It is parsed directly by `matplotlib.pyplot`.
dpi : `int`, default=100
Resolution in "dots per inch". It defines the quality of the image.
lncolor : `str`
Changes the color of the line that represents the limits of the shadow
over Earth.
outcolor :`str`
Changes the color of the lines that represents the limits of the shadow
outside Earth.
scale : `int`, `float`
Arbitrary scale for the size of the name of the site.
cscale : `int`, `float`
Arbitrary scale for the name of the country.
sscale : `int`, `float`
Arbitrary scale for the size of point of the site.
pscale : `int`, `float`
Arbitrary scale for the size of the points that represent the center of
the shadow.
arrow : `bool`
If True, it plots the arrow with the occultation direction.
Important
---------
Required parameters to plot an occultation map: 'name', 'radius', 'coord',
'time', 'ca', 'pa', 'vel', and 'dist'.
Note
----
The parameters 'mag' and 'longi' are optional and only printed in label.
All other remaining parameters can be used to further customize the Map
configuration.
When producing the map, only one of 'centermap_geo' or 'centermap_delta'
options can be used at a time.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
allowed_kwargs = ['alpha', 'arrow', 'atcolor', 'atm', 'centermap_delta', 'centermap_geo', 'centerproj',
'chcolor', 'chord_delta', 'chord_geo', 'countries', 'cpoints', 'cscale', 'dpi', 'ercolor',
'error', 'fmt', 'hcolor', 'heights', 'labels', 'lncolor', 'mapsize', 'mapstyle', 'meridians',
'nameimg', 'nscale', 'offset', 'outcolor', 'parallels', 'path', 'pscale', 'ptcolor',
'resolution', 'ring', 'rncolor', 'site_name', 'sites', 'sscale', 'states', 'zoom',
'site_box_alpha']
input_tests.check_kwargs(kwargs, allowed_kwargs=allowed_kwargs)
if not type(name) == str:
raise TypeError('name keyword must be a string')
radius = radius*u.km
occs = {}
try:
occs['stars'] = SkyCoord(coord, frame='icrs', unit=(u.hourangle, u.degree))
except:
raise KeyError('"star" keyword is not in the format: "hh mm ss.sss dd mm ss.sss" or "hh.hhhhhhhh dd.dddddddd"')
try:
occs['datas'] = Time(time)
except:
raise KeyError('"time" keyword is not a iso or isot time format')
occs['ca'] = ca*u.arcsec
occs['posa'] = pa*u.deg
occs['vel'] = vel*(u.km/u.s)
occs['dist'] = dist*u.AU
occs['magG'] = mag
occs['longi'] = longi
mapstyle = kwargs.get('mapstyle', 1)
if mapstyle not in [1, 2]:
raise ValueError('mapstyle must be 1 or 2]')
resolution = kwargs.get('resolution', 2)
if resolution not in [1, 2, 3]:
raise TypeError('resolution keyword must be one of these: [1, 2, 3] where 1=10m, 2=50m and 3=100m')
res = ['10m', '50m', '110m']
resolution = res[resolution-1]
nameimg = kwargs.get('nameimg', '{}_{}'.format(name, occs['datas'].isot.replace(':', '_')))
fmt = kwargs.get('fmt', 'png')
dpi = kwargs.get('dpi', 100)
step = kwargs.get('step', 1)
mapsize = kwargs.get('mapsize', [46.0, 38.0])*u.cm
erro = kwargs.get('error', None)
ring = kwargs.get('ring', None)
atm = kwargs.get('atm', None)
cpoints = kwargs.get('cpoints', 60)
states = kwargs.get('states', True)
labels = kwargs.get('labels', True)
meridians = kwargs.get('meridians', 30)
parallels = kwargs.get('parallels', 30)
nscale = kwargs.get('nscale', 1)
cscale = kwargs.get('cscale', 1)
sscale = kwargs.get('sscale', 1)
pscale = kwargs.get('pscale', 1)
heights = np.array(kwargs.get('heights'), None)
alpha = kwargs.get('alpha', 0.2)
site_box_alpha = kwargs.get('site_box_alpha', 0.0)
centermap_geo = kwargs.get('centermap_geo', None)
centermap_delta = kwargs.get('centermap_delta', None)
if 'centermap_geo' in kwargs and 'centermap_delta' in kwargs:
raise ValueError('User must give "centermap_geo" OR "centermap_delta"')
zoom = kwargs.get('zoom', 1)
if zoom <= 0:
raise ValueError('zoom can not be equal or smaller than 0.')
off_ra, off_de = kwargs.get('offset', [0.0, 0.0])*u.mas
arrow = kwargs.get('arrow', True)
site_name = kwargs.get('site_name', True)
path = kwargs.get('path', '.')
if not os.path.exists(path):
raise IOError('Path does not exists')
chord_delta = np.array(kwargs.get('chord_delta', []), ndmin=1)*u.km
chord_geo = kwargs.get('chord_geo', [])
if len(chord_geo) > 0:
try:
b = | np.array(chord_geo, ndmin=2) | numpy.array |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import math
from matplotlib import pyplot as plt
import json
from transformations import quaternion_multiply, quaternion_inverse, quaternion_matrix, quaternion_from_matrix, euler_from_quaternion
from scipy.interpolate import UnivariateSpline
#from anim_utils.animation_data.constants import DEFAULT_ROTATION_ORDER
DEFAULT_ROTATION_ORDER = ['Xrotation','Yrotation','Zrotation']
def normalize(v):
return v/np.linalg.norm(v)
def quaternion_from_axis_angle(axis, angle):
q = [1,0,0,0]
if np.linalg.norm(axis) > 0:
q[1] = axis[0] * math.sin(angle / 2)
q[2] = axis[1] * math.sin(angle / 2)
q[3] = axis[2] * math.sin(angle / 2)
q[0] = math.cos(angle / 2)
q = normalize(q)
return q
def exp_map_to_quaternion(e):
angle = np.linalg.norm(e)
if angle > 0:
axis = e / angle
q = quaternion_from_axis_angle(axis, angle)
else:
q = [1, 0, 0, 0]
return q
def convert_exp_frame_to_quat_frame(skeleton, e):
src_offset = 0
dest_offset = 0
n_joints = len(skeleton.animated_joints)
q = np.zeros(n_joints*4)
for node in skeleton.animated_joints:
e_i = e[src_offset:src_offset+3]
q[dest_offset:dest_offset+4] = exp_map_to_quaternion(e_i)
src_offset += 3
dest_offset += 4
return q
def add_quat_frames(skeleton, q_frame1, q_frame2, dest_offset=3):
src_offset = 0
new_quat_frame = np.zeros(len(q_frame1))
new_quat_frame[:3] = q_frame1[:3]
for node in skeleton.animated_joints:
new_q = quaternion_multiply(q_frame1[dest_offset:dest_offset + 4], q_frame2[src_offset:src_offset + 4])
new_quat_frame[dest_offset:dest_offset+4] = new_q
dest_offset += 4
src_offset += 4
return new_quat_frame
def get_3d_rotation_between_vectors(a, b):
v = np.cross(a, b)
s = np.linalg.norm(v)
if s ==0:
return np.eye(3)
c = np.dot(a,b)
v_x = np.array([[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]])
v_x_2 = np.dot(v_x,v_x)
r = np.eye(3) + v_x + (v_x_2* (1-c/s**2))
return r
def normalize_quaternion(q):
return quaternion_inverse(q) / np.dot(q, q)
def get_average_joint_position(skeleton, frames, joint_name, start_frame, end_frame):
end_frame = min(end_frame, frames.shape[0])
temp_positions = []
for idx in range(start_frame, end_frame):
frame = frames[idx]
pos = skeleton.nodes[joint_name].get_global_position(frame)
temp_positions.append(pos)
return np.mean(temp_positions, axis=0)
def get_average_joint_direction(skeleton, frames, joint_name, child_joint_name, start_frame, end_frame,ground_height=0):
temp_dirs = []
for idx in range(start_frame, end_frame):
frame = frames[idx]
pos1 = skeleton.nodes[joint_name].get_global_position(frame)
pos2 = skeleton.nodes[child_joint_name].get_global_position(frame)
#pos2[1] = ground_height
joint_dir = pos2 - pos1
joint_dir /= np.linalg.norm(joint_dir)
temp_dirs.append(joint_dir)
return np.mean(temp_dirs, axis=0)
def get_average_direction_from_target(skeleton, frames, target_pos, child_joint_name, start_frame, end_frame,ground_height=0):
temp_dirs = []
for idx in range(start_frame, end_frame):
frame = frames[idx]
pos2 = skeleton.nodes[child_joint_name].get_global_position(frame)
pos2[1] = ground_height
joint_dir = pos2 - target_pos
joint_dir /= np.linalg.norm(joint_dir)
temp_dirs.append(joint_dir)
return np.mean(temp_dirs, axis=0)
def to_local_cos(skeleton, node_name, frame, q):
# bring into parent coordinate system
pm = skeleton.nodes[node_name].get_global_matrix(frame)[:3,:3]
#pm[:3, 3] = [0, 0, 0]
inv_pm = np.linalg.inv(pm)
r = quaternion_matrix(q)[:3,:3]
lr = np.dot(inv_pm, r)[:3,:3]
q = quaternion_from_matrix(lr)
return q
def get_dir_on_plane(x, n):
axb = np.cross(x,n)
d = np.cross(n, normalize(axb))
d = normalize(d)
return d
def project2(x,n):
""" get direction on plane based on cross product and then project onto the direction """
d = get_dir_on_plane(x, n)
return project_on_line(x, d)
def project_vec3(x, n):
"""" project vector on normal of plane and then substract from vector to get projection on plane """
w = project_on_line(x, n)
v = x-w
return v
def project(x, n):
""" http://www.euclideanspace.com/maths/geometry/elements/plane/lineOnPlane/"""
l = np.linalg.norm(x)
a = normalize(x)
b = normalize(n)
axb = np.cross(a,b)
bxaxb = np.cross(b, axb)
return l * bxaxb
def project_on_line(x, v):
"""https://en.wikipedia.org/wiki/Scalar_projection"""
s = np.dot(x, v) / np.dot(v, v)
return s * v
def project_onto_plane(x, n):
"""https://stackoverflow.com/questions/17915475/how-may-i-project-vectors-onto-a-plane-defined-by-its-orthogonal-vector-in-pytho"""
nl = np.linalg.norm(n)
d = np.dot(x, n) / nl
p = [d * normalize(n)[i] for i in range(len(n))]
return [x[i] - p[i] for i in range(len(x))]
def project_vec_on_plane(vec, n):
"""https://math.stackexchange.com/questions/633181/formula-to-project-a-vector-onto-a-plane"""
n = normalize(n)
d = np.dot(vec, n)
return vec - np.dot(d, n)
def distance_from_point_to_line(p1, p2, vec):
proj = p2+project_on_line(p1, vec)
return np.linalg.norm(proj - p1)
def limb_projection(p1, center, n):
#s1 = np.dot(p1, n) / np.dot(p1, p1)
#proj_p1 = p1 - s1*n
#s2 = np.dot(p2, n) / np.dot(p2, p2)
#proj_p2 = p2 - s2 * n
proj_p1 = project_vec3(p1, n)
proj_center = project_vec3(center, n)
d = np.linalg.norm(proj_p1-proj_center)
return d
def plot_line(ax, start, end,label=None, color=None):
x = start[0], end[0]
y = start[1], end[1]
ax.plot(x, y, label=label, color=color)
def convert_to_foot_positions(joint_heights):
n_frames = len(list(joint_heights.items())[0][1][0])
print(n_frames)
foot_positions = []
for f in range(n_frames):
foot_positions.append(dict())
for joint, data in list(joint_heights.items()):
ps, yv, ya = data
for frame_idx, p in enumerate(ps):
foot_positions[frame_idx].update({joint: p})
return foot_positions
def plot_foot_positions(ax, foot_positions, bodies,step_size=5):
for f, data in enumerate(foot_positions):
if f%step_size != 0:
continue
for body in [list(bodies.values())[0]]:
start_j = body["start"]
end_j = body["end"]
start = f, data[start_j][1]
end = f+5, data[end_j][1]
plot_line(ax, start, end, color="k")
def get_vertical_acceleration(skeleton, frames, joint_name):
""" https://stackoverflow.com/questions/40226357/second-derivative-in-python-scipy-numpy-pandas
"""
ps = []
for frame in frames:
p = skeleton.nodes[joint_name].get_global_position(frame)
ps.append(p)
ps = np.array(ps)
x = np.linspace(0, len(frames), len(frames))
ys = np.array(ps[:, 1])
y_spl = UnivariateSpline(x, ys, s=0, k=4)
velocity = y_spl.derivative(n=1)
acceleration = y_spl.derivative(n=2)
return ps, velocity(x), acceleration(x)
def quaternion_to_axis_angle(q):
"""http://www.euclideanspace.com/maths/geometry/rotations/conversions/quaternionToAngle/
"""
a = 2* math.acos(q[0])
x = q[1] / math.sqrt(1-q[0]*q[0])
y = q[2] / math.sqrt(1-q[0]*q[0])
z = q[3] / math.sqrt(1-q[0]*q[0])
return normalize([x,y,z]),a
def get_delta_quaternion(q1,q2):
return quaternion_multiply(quaternion_inverse(q1), q2)
def get_angular_velocity(skeleton, frames, joint):
""" http://answers.unity3d.com/questions/49082/rotation-quaternion-to-angular-velocity.html
"""
idx = skeleton.animated_joints.index(joint) * 4 + 3
angular_velocity = [[0,0,0]]
prev_q = frames[0, idx:idx + 4]
for frame_idx, frame in enumerate(frames[1:]):
q = frames[frame_idx, idx:idx+4]
q_delta = get_delta_quaternion(prev_q, q)
axis, angle = quaternion_to_axis_angle(q_delta)
a = axis * angle
angular_velocity.append(a)
prev_q = q
return np.array(angular_velocity)
def get_angular_velocities(skeleton, frames, joints):
anglular_velocity = dict()
for joint in joints:
anglular_velocity[joint] = get_angular_velocity(skeleton, frames, joint)
return anglular_velocity
def plot_joint_heights(joint_heights, ground_height=0, frame_range=(None,None)):
plt.figure(1)
ax = plt.subplot(111)
n_frames = 0
for joint, data in list(joint_heights.items()):
ps, yv, ya = data
if frame_range == (None, None):
start, end = 0, len(ps)
else:
start, end = frame_range
n_frames = end- start
x = np.linspace(start,end, n_frames)
plt.plot(x, ps[start:end,1], label=joint)
plot_line(ax, (start, ground_height),(end, ground_height), "ground")
foot_positions = convert_to_foot_positions(joint_heights)
bodies = {"left":{"start":"LeftHeel", "end": "LeftToeBase"}, "right":{"start":"RightHeel", "end": "RightToeBase"}}
#plot_foot_positions(ax, foot_positions, bodies)
plt.legend()
plt.show(True)
def plot_angular_velocities(angular_velocities, frame_range=(None,None)):
plt.figure(1)
ax = plt.subplot(111)
n_frames = 0
for joint, data in list(angular_velocities.items()):
if frame_range == (None, None):
start, end = 0, len(data)
else:
start, end = frame_range
n_frames = end- start
x = np.linspace(start,end, n_frames)
v = list(map(np.linalg.norm, data[start:end]))
plt.plot(x, np.rad2deg(v), label=joint)
plt.legend()
plt.show(True)
def export_constraints(constraints, file_path):
unique_dict = dict()
for frame_idx in constraints:
for c in constraints[frame_idx]:
key = tuple(c.position)
unique_dict[key] = None
points = []
for p in list(unique_dict.keys()):
points.append(p)
data = dict()
data["points"] = points
with open(file_path, "w") as out:
json.dump(data, out)
def plot_constraints(constraints, ground_height=0):
colors ={"RightFoot":"r", "LeftFoot":"g"}
plt.figure(1)
joint_constraints = dict()
ax = plt.subplot(111)
for frame_idx in constraints:
for c in constraints[frame_idx]:
if c.joint_name not in list(joint_constraints.keys()):
joint_constraints[c.joint_name] = []
joint_constraints[c.joint_name].append(c.position)
for joint_name in list(joint_constraints.keys()):
temp = np.array(joint_constraints[joint_name])
y = temp[:, 1]
n_frames = len(y)
x = | np.linspace(0, n_frames, n_frames) | numpy.linspace |
# -*- coding: utf-8 -*-
"""doMusicAndSpeechDetection.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/github/satvik-venkatesh/audio-seg-data-synth/blob/main/models/doMusicAndSpeechDetection.ipynb
"""
import soundfile as sf
import argparse
import numpy as np
import librosa
import tensorflow as tf
import math
from tensorflow import keras
from tensorflow.keras import layers
"""
This function converts the predictions made by the neural network into a readable format.
"""
def preds_to_se(p, audio_clip_length = 8.0):
start_speech = -100
start_music = -100
stop_speech = -100
stop_music = -100
audio_events = []
n_frames = p.shape[0]
if p[0, 0] == 1:
start_speech = 0
if p[0, 1] == 1:
start_music = 0
for i in range(n_frames - 1):
if p[i, 0] == 0 and p[i + 1, 0] == 1:
start_speech = i + 1
elif p[i, 0] == 1 and p[i + 1, 0] == 0:
stop_speech = i
start_time = frames_to_time(start_speech)
stop_time = frames_to_time(stop_speech)
audio_events.append((start_time, stop_time, "speech"))
start_speech = -100
stop_speech = -100
if p[i, 1] == 0 and p[i + 1, 1] == 1:
start_music = i + 1
elif p[i, 1] == 1 and p[i + 1, 1] == 0:
stop_music = i
start_time = frames_to_time(start_music)
stop_time = frames_to_time(stop_music)
audio_events.append((start_time, stop_time, "music"))
start_music = -100
stop_music = -100
if start_speech != -100:
start_time = frames_to_time(start_speech)
stop_time = audio_clip_length
audio_events.append((start_time, stop_time, "speech"))
start_speech = -100
stop_speech = -100
if start_music != -100:
start_time = frames_to_time(start_music)
stop_time = audio_clip_length
audio_events.append((start_time, stop_time, "music"))
start_music = -100
stop_music = -100
audio_events.sort(key = lambda x: x[0])
return audio_events
""" This function was adapted from https://github.com/qlemaire22/speech-music-detection """
def smooth_output(output, min_speech=1.3, min_music=3.4, max_silence_speech=0.4, max_silence_music=0.6):
# This function was adapted from https://github.com/qlemaire22/speech-music-detection
duration_frame = 220 / 22050
n_frame = output.shape[1]
start_music = -1000
start_speech = -1000
for i in range(n_frame):
if output[0, i] == 1:
if i - start_speech > 1:
if (i - start_speech) * duration_frame <= max_silence_speech:
output[0, start_speech:i] = 1
start_speech = i
if output[1, i] == 1:
if i - start_music > 1:
if (i - start_music) * duration_frame <= max_silence_music:
output[1, start_music:i] = 1
start_music = i
start_music = -1000
start_speech = -1000
for i in range(n_frame):
if i != n_frame - 1:
if output[0, i] == 0:
if i - start_speech > 1:
if (i - start_speech) * duration_frame <= min_speech:
output[0, start_speech:i] = 0
start_speech = i
if output[1, i] == 0:
if i - start_music > 1:
if (i - start_music) * duration_frame <= min_music:
output[1, start_music:i] = 0
start_music = i
else:
if i - start_speech > 1:
if (i - start_speech) * duration_frame <= min_speech:
output[0, start_speech:i + 1] = 0
if i - start_music > 1:
if (i - start_music) * duration_frame <= min_music:
output[1, start_music:i + 1] = 0
return output
def frames_to_time(f, sr = 22050.0, hop_size = 220):
return f * hop_size / sr
def get_log_melspectrogram(audio, sr = 22050, hop_length = 220, n_fft = 1024, n_mels = 80, fmin = 64, fmax = 8000):
"""Return the log-scaled Mel bands of an audio signal."""
bands = librosa.feature.melspectrogram(
y=audio, sr=sr, hop_length=hop_length, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax, dtype=np.float32)
return librosa.core.power_to_db(bands, amin=1e-7)
"""
Make predictions for full audio.
"""
def mk_preds_fa(audio_path, hop_size = 6.0, discard = 1.0, win_length = 8.0, sampling_rate = 22050):
in_signal, in_sr = sf.read(audio_path)
# Convert to mono if needed.
if (in_signal.ndim > 1):
in_signal_mono = librosa.to_mono(in_signal.T)
in_signal = np.copy(in_signal_mono)
# Resample the audio file.
in_signal_22k = librosa.resample(in_signal, orig_sr=in_sr, target_sr=sampling_rate)
in_signal = np.copy(in_signal_22k)
# Pad the input signal if it is shorter than 8 s.
if in_signal.shape[0] < int(8.0 * sampling_rate):
pad_signal = np.zeros((int(8.0 * sampling_rate)))
pad_signal[:in_signal.shape[0]] = in_signal
in_signal = np.copy(pad_signal)
audio_clip_length_samples = in_signal.shape[0]
print('audio_clip_length_samples is {}'.format(audio_clip_length_samples))
hop_size_samples = 220 * 602 - 1
win_length_samples = 220 * 802 - 1
n_preds = int(math.ceil((audio_clip_length_samples - win_length_samples) / hop_size_samples)) + 1
in_signal_pad = np.zeros((n_preds * hop_size_samples + 200 * 220))
in_signal_pad[0:audio_clip_length_samples] = in_signal
preds = | np.zeros((n_preds, 802, 2)) | numpy.zeros |
import unittest
import numpy
import test_utils
class TestBasicAddition(unittest.TestCase):
# Test basic addition of all combinations of all types, not checking for any edge cases specifically.
ZERO = numpy.float32(0)
ONE = numpy.float32(1)
MIN_SUBNORM = numpy.float32(1e-45)
MAX_SUBNORM = numpy.float32(1.1754942e-38)
MIN_NORM = numpy.float32(1.1754944e-38)
MAX_NORM = numpy.float32(3.4028235e38)
INF = numpy.float32(numpy.inf)
NAN = numpy.float32(numpy.nan)
# Initialise the tester object used to run the assembled code.
@classmethod
def setUpClass(cls):
cls.tester = test_utils.SubroutineTester("test_addition.s")
cls.tester.initialise()
# Run a test to compare the expected sum of two floats to the actual sum.
def run_test(self, float1: numpy.float32, float2: numpy.float32):
expected = float1 + float2
if numpy.isnan(expected):
self.assertTrue(numpy.isnan(TestBasicAddition.tester.run_test(float1, float2)))
else:
self.assertEqual(float1 + float2,
TestBasicAddition.tester.run_test(float1, float2))
def test_zero(self):
# Test that ±0 + x = x for all types of x.
self.run_test(self.ZERO, self.ZERO)
self.run_test(self.ZERO, -self.ZERO)
self.run_test(-self.ZERO, self.ZERO)
self.run_test(-self.ZERO, -self.ZERO)
self.run_test(self.ZERO, self.ONE)
self.run_test(self.ZERO, -self.ONE)
self.run_test(-self.ZERO, self.ONE)
self.run_test(-self.ZERO, -self.ONE)
self.run_test(self.ZERO, self.MIN_SUBNORM)
self.run_test(self.ZERO, -self.MIN_SUBNORM)
self.run_test(-self.ZERO, self.MIN_SUBNORM)
self.run_test(-self.ZERO, -self.MIN_SUBNORM)
self.run_test(self.ZERO, numpy.float32(9.060464e-39))
self.run_test(self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(self.ZERO, self.MAX_SUBNORM)
self.run_test(self.ZERO, -self.MAX_SUBNORM)
self.run_test(-self.ZERO, self.MAX_SUBNORM)
self.run_test(-self.ZERO, -self.MAX_SUBNORM)
self.run_test(self.ZERO, self.MIN_NORM)
self.run_test(self.ZERO, -self.MIN_NORM)
self.run_test(-self.ZERO, self.MIN_NORM)
self.run_test(-self.ZERO, -self.MIN_NORM)
self.run_test(self.ZERO, numpy.float32(395.6166))
self.run_test(self.ZERO, -numpy.float32(395.6166))
self.run_test(-self.ZERO, numpy.float32(395.6166))
self.run_test(-self.ZERO, -numpy.float32(395.6166))
self.run_test(self.ZERO, self.MAX_NORM)
self.run_test(self.ZERO, -self.MAX_NORM)
self.run_test(-self.ZERO, self.MAX_NORM)
self.run_test(-self.ZERO, -self.MAX_NORM)
self.run_test(self.ZERO, self.INF)
self.run_test(self.ZERO, -self.INF)
self.run_test(-self.ZERO, self.INF)
self.run_test(-self.ZERO, -self.INF)
self.run_test(self.ZERO, self.NAN)
self.run_test(-self.ZERO, self.NAN)
def test_one(self):
# Test ±1 + x for all types of x.
self.run_test(self.ONE, self.ZERO)
self.run_test(self.ONE, -self.ZERO)
self.run_test(-self.ONE, self.ZERO)
self.run_test(-self.ONE, -self.ZERO)
self.run_test(self.ONE, self.ONE)
self.run_test(self.ONE, -self.ONE)
self.run_test(-self.ONE, self.ONE)
self.run_test(-self.ONE, -self.ONE)
self.run_test(self.ONE, self.MIN_SUBNORM)
self.run_test(self.ONE, -self.MIN_SUBNORM)
self.run_test(-self.ONE, self.MIN_SUBNORM)
self.run_test(-self.ONE, -self.MIN_SUBNORM)
self.run_test(self.ONE, numpy.float32(1.902965e-39))
self.run_test(self.ONE, -numpy.float32(1.902965e-39))
self.run_test(-self.ONE, numpy.float32(1.902965e-39))
self.run_test(-self.ONE, -numpy.float32(1.902965e-39))
self.run_test(self.ONE, self.MAX_SUBNORM)
self.run_test(self.ONE, -self.MAX_SUBNORM)
self.run_test(-self.ONE, self.MAX_SUBNORM)
self.run_test(-self.ONE, -self.MAX_SUBNORM)
self.run_test(self.ONE, self.MIN_NORM)
self.run_test(self.ONE, -self.MIN_NORM)
self.run_test(-self.ONE, self.MIN_NORM)
self.run_test(-self.ONE, -self.MIN_NORM)
self.run_test(self.ONE, numpy.float32(7918.158))
self.run_test(self.ONE, -numpy.float32(7918.158))
self.run_test(-self.ONE, numpy.float32(7918.158))
self.run_test(-self.ONE, -numpy.float32(7918.158))
self.run_test(self.ONE, self.MAX_NORM)
self.run_test(self.ONE, -self.MAX_NORM)
self.run_test(-self.ONE, self.MAX_NORM)
self.run_test(-self.ONE, -self.MAX_NORM)
self.run_test(self.ONE, self.INF)
self.run_test(self.ONE, -self.INF)
self.run_test(-self.ONE, self.INF)
self.run_test(-self.ONE, -self.INF)
self.run_test(self.ONE, self.NAN)
self.run_test(-self.ONE, self.NAN)
def test_min_subnorm(self):
# Test ±MIN_SUBNORM + x for all types of x.
self.run_test(self.MIN_SUBNORM, self.ZERO)
self.run_test(self.MIN_SUBNORM, -self.ZERO)
self.run_test(-self.MIN_SUBNORM, self.ZERO)
self.run_test(-self.MIN_SUBNORM, -self.ZERO)
self.run_test(self.MIN_SUBNORM, self.ONE)
self.run_test(self.MIN_SUBNORM, -self.ONE)
self.run_test(-self.MIN_SUBNORM, self.ONE)
self.run_test(-self.MIN_SUBNORM, -self.ONE)
self.run_test(self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, self.INF)
self.run_test(self.MIN_SUBNORM, -self.INF)
self.run_test(-self.MIN_SUBNORM, self.INF)
self.run_test(-self.MIN_SUBNORM, -self.INF)
self.run_test(self.MIN_SUBNORM, self.NAN)
self.run_test(-self.MIN_SUBNORM, self.NAN)
def test_subnorm(self):
# Test ±x + y for subnormal x and all types of y.
self.run_test(numpy.float32(7.518523e-39), self.ZERO)
self.run_test(numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(numpy.float32(2.028916e-39), self.ONE)
self.run_test(numpy.float32(2.028916e-39), -self.ONE)
self.run_test(-numpy.float32(2.028916e-39), self.ONE)
self.run_test(-numpy.float32(2.028916e-39), -self.ONE)
self.run_test(numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(1.989006e-39), self.MAX_SUBNORM)
self.run_test(numpy.float32(1.989006e-39), -self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.989006e-39), self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.989006e-39), -self.MAX_SUBNORM)
self.run_test(numpy.float32(2.952435e-39), self.MIN_NORM)
self.run_test(numpy.float32(2.952435e-39), -self.MIN_NORM)
self.run_test(-numpy.float32(2.952435e-39), self.MIN_NORM)
self.run_test(-numpy.float32(2.952435e-39), -self.MIN_NORM)
self.run_test(numpy.float32(1.154907e-38), numpy.float32(4.0687437e-36))
self.run_test(numpy.float32(1.154907e-38), -numpy.float32(4.0687437e-36))
self.run_test(-numpy.float32(1.154907e-38), numpy.float32(4.0687437e-36))
self.run_test(-numpy.float32(1.154907e-38), -numpy.float32(4.0687437e-36))
self.run_test(numpy.float32(9.79494e-39), self.MAX_NORM)
self.run_test(numpy.float32(9.79494e-39), -self.MAX_NORM)
self.run_test(-numpy.float32(9.79494e-39), self.MAX_NORM)
self.run_test(-numpy.float32(9.79494e-39), -self.MAX_NORM)
self.run_test(numpy.float32(1.54569e-39), self.INF)
self.run_test(numpy.float32(1.54569e-39), -self.INF)
self.run_test(-numpy.float32(1.54569e-39), self.INF)
self.run_test(-numpy.float32(1.54569e-39), -self.INF)
self.run_test(numpy.float32(3.974073e-39), self.NAN)
self.run_test(-numpy.float32(3.974073e-39), self.NAN)
def test_max_subnorm(self):
# Test ±MAX_SUBNORM + x for all types of x.
self.run_test(self.MAX_SUBNORM, self.ZERO)
self.run_test(self.MAX_SUBNORM, -self.ZERO)
self.run_test(-self.MAX_SUBNORM, self.ZERO)
self.run_test(-self.MAX_SUBNORM, -self.ZERO)
self.run_test(self.MAX_SUBNORM, self.ONE)
self.run_test(self.MAX_SUBNORM, -self.ONE)
self.run_test(-self.MAX_SUBNORM, self.ONE)
self.run_test(-self.MAX_SUBNORM, -self.ONE)
self.run_test(self.MAX_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MAX_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MAX_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MAX_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MAX_SUBNORM, numpy.float32(2.736488e-39))
self.run_test(self.MAX_SUBNORM, -numpy.float32(2.736488e-39))
self.run_test(-self.MAX_SUBNORM, numpy.float32(2.736488e-39))
self.run_test(-self.MAX_SUBNORM, -numpy.float32(2.736488e-39))
self.run_test(self.MAX_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MAX_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MAX_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MAX_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MAX_SUBNORM, self.MIN_NORM)
self.run_test(self.MAX_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MAX_SUBNORM, self.MIN_NORM)
self.run_test(-self.MAX_SUBNORM, -self.MIN_NORM)
self.run_test(self.MAX_SUBNORM, numpy.float32(8.027242e-35))
self.run_test(self.MAX_SUBNORM, -numpy.float32(8.027242e-35))
self.run_test(-self.MAX_SUBNORM, numpy.float32(8.027242e-35))
self.run_test(-self.MAX_SUBNORM, -numpy.float32(8.027242e-35))
self.run_test(self.MAX_SUBNORM, self.MAX_NORM)
self.run_test(self.MAX_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MAX_SUBNORM, self.MAX_NORM)
self.run_test(-self.MAX_SUBNORM, -self.MAX_NORM)
self.run_test(self.MAX_SUBNORM, self.INF)
self.run_test(self.MAX_SUBNORM, -self.INF)
self.run_test(-self.MAX_SUBNORM, self.INF)
self.run_test(-self.MAX_SUBNORM, -self.INF)
self.run_test(self.MAX_SUBNORM, self.NAN)
self.run_test(-self.MAX_SUBNORM, self.NAN)
def test_min_norm(self):
# Test ±MIN_NORM + x for all types of x.
self.run_test(self.MIN_NORM, self.ZERO)
self.run_test(self.MIN_NORM, -self.ZERO)
self.run_test(-self.MIN_NORM, self.ZERO)
self.run_test(-self.MIN_NORM, -self.ZERO)
self.run_test(self.MIN_NORM, self.ONE)
self.run_test(self.MIN_NORM, -self.ONE)
self.run_test(-self.MIN_NORM, self.ONE)
self.run_test(-self.MIN_NORM, -self.ONE)
self.run_test(self.MIN_NORM, self.MIN_SUBNORM)
self.run_test(self.MIN_NORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_NORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_NORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_NORM, numpy.float32(7.235862e-39))
self.run_test(self.MIN_NORM, -numpy.float32(7.235862e-39))
self.run_test(-self.MIN_NORM, numpy.float32(7.235862e-39))
self.run_test(-self.MIN_NORM, -numpy.float32(7.235862e-39))
self.run_test(self.MIN_NORM, self.MAX_SUBNORM)
self.run_test(self.MIN_NORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_NORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_NORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_NORM, self.MIN_NORM)
self.run_test(self.MIN_NORM, -self.MIN_NORM)
self.run_test(-self.MIN_NORM, self.MIN_NORM)
self.run_test(-self.MIN_NORM, -self.MIN_NORM)
self.run_test(self.MIN_NORM, numpy.float32(3.0655702e-37))
self.run_test(self.MIN_NORM, -numpy.float32(3.0655702e-37))
self.run_test(-self.MIN_NORM, numpy.float32(3.0655702e-37))
self.run_test(-self.MIN_NORM, -numpy.float32(3.0655702e-37))
self.run_test(self.MIN_NORM, self.MAX_NORM)
self.run_test(self.MIN_NORM, -self.MAX_NORM)
self.run_test(-self.MIN_NORM, self.MAX_NORM)
self.run_test(-self.MIN_NORM, -self.MAX_NORM)
self.run_test(self.MIN_NORM, self.INF)
self.run_test(self.MIN_NORM, -self.INF)
self.run_test(-self.MIN_NORM, self.INF)
self.run_test(-self.MIN_NORM, -self.INF)
self.run_test(self.MIN_NORM, self.NAN)
self.run_test(-self.MIN_NORM, self.NAN)
def test_norm(self):
# Test ±x + y for normal x and all types of y.
self.run_test(numpy.float32(3.2528998e8), self.ZERO)
self.run_test(numpy.float32(3.2528998e8), -self.ZERO)
self.run_test(-numpy.float32(3.2528998e8), self.ZERO)
self.run_test(-numpy.float32(3.2528998e8), -self.ZERO)
self.run_test(numpy.float32(5781.5137), self.ONE)
self.run_test(numpy.float32(5781.5137), -self.ONE)
self.run_test(-numpy.float32(5781.5137), self.ONE)
self.run_test(-numpy.float32(5781.5137), -self.ONE)
self.run_test(numpy.float32(4.0233208e-35), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.0233208e-35), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.0233208e-35), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.0233208e-35), -self.MIN_SUBNORM)
self.run_test(numpy.float32(3.4244755e-37), numpy.float32(7.951416e-39))
self.run_test(numpy.float32(3.4244755e-37), -numpy.float32(7.951416e-39))
self.run_test(-numpy.float32(3.4244755e-37), numpy.float32(7.951416e-39))
self.run_test(-numpy.float32(3.4244755e-37), -numpy.float32(7.951416e-39))
self.run_test(numpy.float32(1.772688e-35), self.MAX_SUBNORM)
self.run_test(numpy.float32(1.772688e-35), -self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.772688e-35), self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.772688e-35), -self.MAX_SUBNORM)
self.run_test(numpy.float32(9.7266296e-36), self.MIN_NORM)
self.run_test(numpy.float32(9.7266296e-36), -self.MIN_NORM)
self.run_test(-numpy.float32(9.7266296e-36), self.MIN_NORM)
self.run_test(-numpy.float32(9.7266296e-36), -self.MIN_NORM)
self.run_test(numpy.float32(9.964942e17), numpy.float32(3.0321312e16))
self.run_test(numpy.float32(9.964942e17), -numpy.float32(3.0321312e16))
self.run_test(-numpy.float32(9.964942e17), numpy.float32(3.0321312e16))
self.run_test(-numpy.float32(9.964942e17), -numpy.float32(3.0321312e16))
self.run_test(numpy.float32(3.3541464e35), self.MAX_NORM)
self.run_test(numpy.float32(3.3541464e35), -self.MAX_NORM)
self.run_test(-numpy.float32(3.3541464e35), self.MAX_NORM)
self.run_test(-numpy.float32(3.3541464e35), -self.MAX_NORM)
self.run_test( | numpy.float32(1.8177568e25) | numpy.float32 |
from __future__ import absolute_import
from builtins import range
from . import datalayer
import numpy as np
# from numpy.polynomial.polynomial import polyval
## TODO: correctly handle large gaps (wait what?)
## TODO: correctly handle multiple vertical values
# Function consisting of a single Bezier curve
class CurveFunction(datalayer.Function):
# the global variables:
# self.pixels [(point0), (point1), (point2), (point3)] - the control points, in pixel space
# self.p0, self.p1, self.p2, self.p3 - the control points, in math space
# the polynomials for x and y, their derivatives, and their second derivatives:
# self.x, self.y
# self.dxdt, self.dydt
# self.ddx, self.ddy
def __init__(self, xaxis, yaxis, path_info, tolerance = dict()):
datalayer.Function.__init__(self, xaxis, yaxis, path_info, tolerance)
self.set_default_tolerance('imag_threshold', 1e-5) # threshold for determining real / complex number
self.set_default_tolerance('t_threshold', 0.002) # threshold for t values
# self.set_default_tolerance('straight_line', 100) # threshold for straight lines
def create(self):
self.x = np.array([-1, 3, -3, 1]) * self.p0[0] + np.array([3, -6, 3, 0]) * self.p1[0] + np.array([-3, 3, 0, 0]) * self.p2[0] + np.array([1, 0, 0, 0]) * self.p3[0]
self.y = np.array([-1, 3, -3, 1]) * self.p0[1] + np.array([3, -6, 3, 0]) * self.p1[1] + np.array([-3, 3, 0, 0]) * self.p2[1] + np.array([1, 0, 0, 0]) * self.p3[1]
self.dxdt = np.array([1, -2, 1]) * 3 * (self.p1[0] - self.p0[0]) + np.array([-1, 1, 0]) * 6 * (self.p2[0]-self.p1[0]) + np.array([1, 0, 0]) * 3 * (self.p3[0] - self.p2[0])
self.dydt = np.array([1, -2, 1]) * 3 * (self.p1[1] - self.p0[1]) + | np.array([-1, 1, 0]) | numpy.array |
from imutils.video import VideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
import os
from openvino.inference_engine import IECore
#from BeamSearch import BeamEntry, BeamState, applyLM, addBeam, ctcBeamSearch
import enchant
import pygame
from gtts import gTTS
import subprocess
import signal
import pvporcupine
import pyaudio
import struct
import sys
#from porcupine_demo_mic import *
#from word_beam_search import WordBeamSearch
#from inference import Inference #for OCR model
#d = enchant.Dict("en_US")
fpsstr = ""
framecount = 0
time1 = 0
def rotated_Rectangle(img, rotatedRect, color, thickness=1, lineType=cv2.LINE_8, shift=0):
(x, y), (width, height), angle = rotatedRect
pt1_1 = (int(x + width / 2), int(y + height / 2))
pt2_1 = (int(x + width / 2), int(y - height / 2))
pt3_1 = (int(x - width / 2), int(y - height / 2))
pt4_1 = (int(x - width / 2), int(y + height / 2))
t = np.array([[np.cos(angle), -np.sin(angle), x-x*np.cos(angle)+y* | np.sin(angle) | numpy.sin |
from __future__ import print_function
import sys
import numpy as np
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba.numpy_support import from_dtype
from numba import types, njit, typeof
from .support import TestCase, CompilationCache, MemoryLeakMixin
def array_dtype(a):
return a.dtype
def use_dtype(a, b):
return a.view(b.dtype)
def array_itemsize(a):
return a.itemsize
def array_shape(a, i):
return a.shape[i]
def array_strides(a, i):
return a.strides[i]
def array_ndim(a):
return a.ndim
def array_size(a):
return a.size
def array_flags_contiguous(a):
return a.flags.contiguous
def array_flags_c_contiguous(a):
return a.flags.c_contiguous
def array_flags_f_contiguous(a):
return a.flags.f_contiguous
def nested_array_itemsize(a):
return a.f.itemsize
def nested_array_shape(a):
return a.f.shape
def nested_array_strides(a):
return a.f.strides
def nested_array_ndim(a):
return a.f.ndim
def nested_array_size(a):
return a.f.size
def size_after_slicing_usecase(buf, i):
sliced = buf[i]
# Make sure size attribute is not lost
return sliced.size
def array_ctypes_data(arr):
return arr.ctypes.data
class TestArrayAttr(MemoryLeakMixin, TestCase):
def setUp(self):
super(TestArrayAttr, self).setUp()
self.ccache = CompilationCache()
self.a = np.arange(10, dtype=np.int32).reshape(2, 5)
def check_unary(self, pyfunc, arr):
cfunc = self.get_cfunc(pyfunc, (typeof(arr),))
expected = pyfunc(arr)
self.assertPreciseEqual(cfunc(arr), expected)
def check_unary_with_arrays(self, pyfunc):
self.check_unary(pyfunc, self.a)
self.check_unary(pyfunc, self.a.T)
self.check_unary(pyfunc, self.a[::2])
# 0-d array
arr = np.array([42]).reshape(())
self.check_unary(pyfunc, arr)
# array with an empty dimension
arr = np.zeros(0)
self.check_unary(pyfunc, arr)
self.check_unary(pyfunc, arr.reshape((1, 0, 2)))
def get_cfunc(self, pyfunc, argspec):
cres = self.ccache.compile(pyfunc, argspec)
return cres.entry_point
def test_shape(self):
pyfunc = array_shape
cfunc = self.get_cfunc(pyfunc, (types.int32[:,:], types.int32))
for i in range(self.a.ndim):
self.assertEqual(pyfunc(self.a, i), cfunc(self.a, i))
def test_strides(self):
pyfunc = array_strides
cfunc = self.get_cfunc(pyfunc, (types.int32[:,:], types.int32))
for i in range(self.a.ndim):
self.assertEqual(pyfunc(self.a, i), cfunc(self.a, i))
def test_ndim(self):
self.check_unary_with_arrays(array_ndim)
def test_size(self):
self.check_unary_with_arrays(array_size)
def test_itemsize(self):
self.check_unary_with_arrays(array_itemsize)
def test_dtype(self):
pyfunc = array_dtype
self.check_unary(pyfunc, self.a)
dtype = np.dtype([('x', np.int8), ('y', np.int8)])
arr = np.zeros(4, dtype=dtype)
self.check_unary(pyfunc, arr)
def test_use_dtype(self):
# Test using the dtype attribute inside the Numba function itself
b = np.empty(1, dtype=np.int16)
pyfunc = use_dtype
cfunc = self.get_cfunc(pyfunc, (typeof(self.a), typeof(b)))
expected = pyfunc(self.a, b)
self.assertPreciseEqual(cfunc(self.a, b), expected)
def test_flags_contiguous(self):
self.check_unary_with_arrays(array_flags_contiguous)
def test_flags_c_contiguous(self):
self.check_unary_with_arrays(array_flags_c_contiguous)
def test_flags_f_contiguous(self):
self.check_unary_with_arrays(array_flags_f_contiguous)
class TestNestedArrayAttr(MemoryLeakMixin, unittest.TestCase):
def setUp(self):
super(TestNestedArrayAttr, self).setUp()
dtype = np.dtype([('a', np.int32), ('f', np.int32, (2, 5))])
self.a = np.recarray(1, dtype)[0]
self.nbrecord = from_dtype(self.a.dtype)
def get_cfunc(self, pyfunc):
cres = compile_isolated(pyfunc, (self.nbrecord,))
return cres.entry_point
def test_shape(self):
pyfunc = nested_array_shape
cfunc = self.get_cfunc(pyfunc)
self.assertEqual(pyfunc(self.a), cfunc(self.a))
def test_strides(self):
pyfunc = nested_array_strides
cfunc = self.get_cfunc(pyfunc)
self.assertEqual(pyfunc(self.a), cfunc(self.a))
def test_ndim(self):
pyfunc = nested_array_ndim
cfunc = self.get_cfunc(pyfunc)
self.assertEqual(pyfunc(self.a), cfunc(self.a))
def test_size(self):
pyfunc = nested_array_size
cfunc = self.get_cfunc(pyfunc)
self.assertEqual(pyfunc(self.a), cfunc(self.a))
def test_itemsize(self):
pyfunc = nested_array_itemsize
cfunc = self.get_cfunc(pyfunc)
self.assertEqual(pyfunc(self.a), cfunc(self.a))
class TestSlicedArrayAttr(MemoryLeakMixin, unittest.TestCase):
def test_size_after_slicing(self):
pyfunc = size_after_slicing_usecase
cfunc = njit(pyfunc)
arr = np.arange(2 * 5).reshape(2, 5)
for i in range(arr.shape[0]):
self.assertEqual(pyfunc(arr, i), cfunc(arr, i))
arr = | np.arange(2 * 5 * 3) | numpy.arange |
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# world characteristics
world.dim_c = 2
num_agents = 3
world.num_agents = num_agents
num_landmarks = num_agents + 1
# adding agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = False
agent.silent = True
agent.size = 0.05
# adding landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
landmark.size = 0.07
# Initial Conditions
self.reset_world(world)
return world
def reset_world(self, world):
# Landmarks characteristics
for landmark in world.landmarks:
landmark.color = np.array([0.15, 0.15, 0.15])
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = | np.zeros(world.dim_p) | numpy.zeros |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# etips
#
# Copyright (c) Siemens AG, 2020
# Authors:
# <NAME> <<EMAIL>>
# License-Identifier: MIT
from pathlib import Path
from joblib import dump
import numpy as np
from sklearn.model_selection import KFold
from sklearn.dummy import DummyClassifier
from utils import fix_random_seed, load_counting_data, load_mnist_data
if __name__ == '__main__':
fix_random_seed(0)
data_fp = Path('../data/')
exp_name = 'RD1' # or RD2
cv_index = 0 # 0-4
exp_fp = Path(f'./Exps/{exp_name}/CV{cv_index}/')
exp_fp.mkdir(parents=True, exist_ok=True)
x, y = load_counting_data(fp=data_fp, fn='Dataset_10k.pickle')
# x, y = load_mnist_data()
y = | np.argmax(y, axis=1) | numpy.argmax |
import pandas as pd
import numpy as np
def range_groups(df, number_headers, bins=None):
max_bins = 20
df_desc = df[number_headers].describe().reset_index()
df_nums = df[number_headers]
df_nums = df_nums.dropna()
if not bins:
lowest_min_header = None
lowest_min_value = None
highest_max_header = None
highest_max_value = None
for number_header in number_headers:
min_val = df_desc.loc[df_desc['index'] == 'min'][number_header].values[0]
max_val = df_desc.loc[df_desc['index'] == 'max'][number_header].values[0]
if not lowest_min_value or min_val < lowest_min_value:
lowest_min_header = number_header
lowest_min_value = min_val
if not highest_max_value or max_val > highest_max_value:
highest_max_header = number_header
highest_max_value = max_val
high_low = | np.concatenate([df_nums[lowest_min_header].values,df_nums[highest_max_header].values]) | numpy.concatenate |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([1,0,0,1,-1,0,0,0,1]) | numpy.array |
# example explained in Ansmann paper
import time
startTstartup= time.process_time()
import sys
import numpy as np
from scipy.sparse import csr_matrix
from symengine import sin
from numpy import zeros
import os
endTstartup= time.process_time()
# Parse command line args
systemid = int(sys.argv[1])
tend = float(sys.argv[2])
atol = float(sys.argv[3])
rtol = float(sys.argv[4])
integrator = "dopri5"
# Time loading data
startTload = time.process_time()
import sqlite3
db=os.environ['KURABENCH_DB']
systemid = sys.argv[1]
# Load data
## First read the system info, inc. # of edges for allocating arrays
conn = sqlite3.connect(db)
cursor = conn.execute(f'SELECT name, nodes, edges, coupling_constant \
from systems where id={systemid}')
name,nodes,edges,couplingConstant = cursor.fetchone()
## Now read the connectivity
sources = np.empty(edges,dtype=int)
dests = np.empty(edges,dtype=int)
data = | np.ones(edges,dtype=bool) | numpy.ones |
from app import db
from carculator import *
import json
import itertools
import csv
from app.models import Task
import numpy as np
class Calculation:
def __init__(self):
bs = BackgroundSystemModel()
self.electricity_mix = bs.electricity_mix
self.biogasoline = bs.biogasoline
self.biodiesel = bs.biodiesel
self.biomethane = bs.biomethane
self.region_map = bs.region_map
self.cip = CarInputParameters()
self.cip.static()
self.d_categories = {
self.cip.metadata[a]["name"]: self.cip.metadata[a]["category"]
for a in self.cip.metadata
}
self.dcts, self.arr = fill_xarray_from_input_parameters(self.cip)
self.d_pt_en = {
"Petrol": "ICEV-p",
"Diesel": "ICEV-d",
"CNG": "ICEV-g",
"Electric": "BEV",
"Fuel cell": "FCEV",
"Hybrid-petrol": "HEV-p",
"Hybrid-diesel": "HEV-d",
"(Plugin) Hybrid-petrol": "PHEV-p",
"(Plugin) Hybrid-diesel": "PHEV-d",
}
self.d_pt_it = {
"Benzina": "ICEV-p",
"Diesel": "ICEV-d",
"Gas compresso": "ICEV-g",
"Elettrica": "BEV",
"Cella a combustibile": "FCEV",
"Ibrido benzina": "HEV-p",
"Ibrido diesel": "HEV-d",
"Ibrido-benzina (Plugin)": "PHEV-p",
"Ibrido-diesel (Plugin)": "PHEV-d",
}
self.d_pt_de = {
"Benzin": "ICEV-p",
"Diesel": "ICEV-d",
"Komprimiertes Gas": "ICEV-g",
"Elektrisch": "BEV",
"Brennstoffzelle": "FCEV",
"Hybrid-Benzin": "HEV-p",
"Hybrid-Diesel": "HEV-d",
"(Plugin) Hybrid-Benzin": "PHEV-p",
"(Plugin) Hybrid-Diesel": "PHEV-d",
}
self.d_pt_fr = {
"Essence": "ICEV-p",
"Diesel": "ICEV-d",
"Gaz comprimé": "ICEV-g",
"Electrique": "BEV",
"Pile à combustible": "FCEV",
"Hybride-essence": "HEV-p",
"Hybride-diesel": "HEV-d",
"Hybride-essence rechargeable": "PHEV-p",
"Hybride-diesel rechargeable": "PHEV-d",
}
self.d_pt_all = {
"Petrol": "ICEV-p",
"Diesel": "ICEV-d",
"CNG": "ICEV-g",
"Electric": "BEV",
"Fuel cell": "FCEV",
"Hybrid-petrol": "HEV-p",
"Hybrid-diesel": "HEV-d",
"(Plugin) Hybrid-petrol": "PHEV-p",
"(Plugin) Hybrid-diesel": "PHEV-d",
"Benzina": "ICEV-p",
"Gas compresso": "ICEV-g",
"Elettrica": "BEV",
"Cella a combustibile": "FCEV",
"Ibrido benzina": "HEV-p",
"Ibrido diesel": "HEV-d",
"Ibrido-benzina (Plugin)": "PHEV-p",
"Ibrido-diesel (Plugin)": "PHEV-d",
"Benzin": "ICEV-p",
"Komprimiertes Gas": "ICEV-g",
"Elektrisch": "BEV",
"Brennstoffzelle": "FCEV",
"Hybrid-Benzin": "HEV-p",
"Hybrid-Diesel": "HEV-d",
"(Plugin) Hybrid-Benzin": "PHEV-p",
"(Plugin) Hybrid-Diesel": "PHEV-d",
"Essence": "ICEV-p",
"Gaz comprimé": "ICEV-g",
"Electrique": "BEV",
"Pile à combustible": "FCEV",
"Hybride-essence": "HEV-p",
"Hybride-diesel": "HEV-d",
"Hybride-essence rechargeable": "PHEV-p",
"Hybride-diesel rechargeable": "PHEV-d",
}
self.d_size_en = {
"Minicompact": "Mini",
"Subcompact": "Small",
"Compact": "Lower medium",
"Mid-size": "Medium",
"Large": "Large",
"SUV": "SUV",
"Van": "Van",
}
self.d_size_fr = {
"Mini-citadine": "Mini",
"Citadine": "Small",
"Berline compacte": "Lower medium",
"Berline familiale": "Medium",
"Grande routière": "Large",
"SUV": "SUV",
"Van": "Van",
}
self.d_size_it = {
"Mini citycar": "Mini",
"Citycar": "Small",
"Berlina compatta": "Lower medium",
"Berlina medio-grande": "Medium",
"Berlina tre volumi": "Large",
"SUV": "SUV",
"Van": "Van",
}
self.d_size_de = {
"Kleinstwagen": "Mini",
"Kleinwagen": "Small",
"Kompaktklasse": "Lower medium",
"Mittelklasse": "Medium",
"Oberklasse": "Large",
"Geländewagen": "SUV",
"Van": "Van",
}
self.d_size_all = {
"Minicompact": "Mini",
"Subcompact": "Small",
"Compact": "Lower medium",
"Mid-size": "Medium",
"Large": "Large",
"SUV": "SUV",
"Van": "Van",
"Mini-citadine": "Mini",
"Citadine": "Small",
"Berline compacte": "Lower medium",
"Berline familiale": "Medium",
"Grande routière": "Large",
"Mini citycar": "Mini",
"Citycar": "Small",
"Berlina compatta": "Lower medium",
"Berlina medio-grande": "Medium",
"Berlina tre volumi": "Large",
"Kleinstwagen": "Mini",
"Kleinwagen": "Small",
"Kompaktklasse": "Lower medium",
"Mittelklasse": "Medium",
"Oberklasse": "Large",
"Geländewagen": "SUV",
}
self.d_rev_pt_en = {v: k for k, v, in self.d_pt_en.items()}
self.d_rev_pt_fr = {v: k for k, v, in self.d_pt_fr.items()}
self.d_rev_pt_it = {v: k for k, v, in self.d_pt_it.items()}
self.d_rev_pt_de = {v: k for k, v, in self.d_pt_de.items()}
self.d_rev_size_en = {v: k for k, v, in self.d_size_en.items()}
self.d_rev_size_fr = {v: k for k, v, in self.d_size_fr.items()}
self.d_rev_size_it = {v: k for k, v, in self.d_size_it.items()}
self.d_rev_size_de = {v: k for k, v, in self.d_size_de.items()}
self.excel = ""
def load_map_file(self, lang):
with open("data/car_to_class_map.csv", "r", encoding="ISO-8859-1") as f:
data = [list(line) for line in csv.reader(f, delimiter=";")]
if lang == "en":
for d in data:
d[4] = self.d_rev_pt_en[d[4]]
d[5] = self.d_rev_size_en[d[5]]
if lang == "fr":
for d in data:
d[4] = self.d_rev_pt_fr[d[4]]
d[5] = self.d_rev_size_fr[d[5]]
if lang == "de":
for d in data:
d[4] = self.d_rev_pt_de[d[4]]
d[5] = self.d_rev_size_de[d[5]]
if lang == "it":
for d in data:
d[4] = self.d_rev_pt_it[d[4]]
d[5] = self.d_rev_size_it[d[5]]
return data
def load_params_file(self):
with open("data/parameters definition.txt", "r") as f:
data = [line for line in csv.reader(f, delimiter="\t")]
return data
def interpolate_array(self, years):
return self.arr.interp(year=years, kwargs={"fill_value": "extrapolate"})
def get_dc(self, dc):
return get_standard_driving_cycle(dc)
def create_config_array(self, dict_params, array, mix, energy_storage):
arr = []
year = [int(y) for y in dict_params[("Functional unit",)]["year"]]
driving_cycle = dict_params[("Driving cycle",)]
country = dict_params[("Background",)]["country"]
passengers = dict_params[("Foreground",)][
("Glider", "all", "all", "average passengers", "none")
][(year[0], "loc")]
cargo_mass = dict_params[("Foreground",)][
("Glider", "all", "all", "cargo mass", "none")
][(year[0], "loc")]
lifetime = dict_params[("Foreground",)][
("Driving", "all", "all", "lifetime kilometers", "none")
][(year[0], "loc")]
km_per_year = dict_params[("Foreground",)][
("Driving", "all", "all", "kilometers per year", "none")
][(year[0], "loc")]
for pt in array.coords["powertrain"].values:
for s in array.coords["size"].values:
for y, year in enumerate(array.coords["year"].values.astype(int)):
electricity_mix = mix[y].tolist()
params = [
pt,
s,
int(year),
lifetime,
km_per_year,
passengers,
cargo_mass,
driving_cycle,
country,
electricity_mix,
]
other_params = (
array.sel(
powertrain=pt,
size=s,
year=year,
value=0,
parameter=[
"TtW energy",
"driving mass",
"combustion power",
"electric power",
"range",
"engine efficiency",
"drivetrain efficiency",
"TtW efficiency",
"battery discharge efficiency",
"energy battery mass",
"battery cell energy density",
"electric energy stored",
"battery lifetime kilometers",
],
)
.values.astype(float)
.tolist()
)
params.extend(other_params)
if pt in ("BEV"):
battery_chem = dict_params[("Background",)]["energy storage"][
"electric"
]["type"]
battery_origin = dict_params[("Background",)]["energy storage"][
"electric"
]["origin"]
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = ["", "", "", ""]
else:
battery_chem, battery_origin = ["", ""]
if pt in ("ICEV-p", "PHEV-p", "HEV-p"):
if "fuel blend" in dict_params[("Background",)]:
if "petrol" in dict_params[("Background",)]["fuel blend"]:
primary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["petrol"]["primary fuel"]["type"]
primary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["petrol"]["primary fuel"]["share"][y]
secondary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["petrol"]["secondary fuel"]["type"]
secondary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["petrol"]["secondary fuel"]["share"][y]
else:
if country in self.biogasoline.country.values:
share_biogasoline = np.squeeze(np.clip(
self.biogasoline.sel(
country=country
)
.interp(year=year, kwargs={"fill_value": "extrapolate"})
.values
, 0, 1)).tolist()
else:
share_biogasoline = 0
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = [
"petrol",
1 - share_biogasoline,
"bioethanol - wheat straw",
share_biogasoline,
]
else:
if country in self.biogasoline.country.values:
share_biogasoline = np.squeeze(np.clip(
self.biogasoline.sel(
country=country
)
.interp(year=year, kwargs={"fill_value": "extrapolate"})
.values
, 0, 1)).tolist()
else:
share_biogasoline = 0
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = [
"petrol",
1 - share_biogasoline,
"bioethanol - wheat straw",
share_biogasoline,
]
if pt in ("ICEV-d", "PHEV-d", "HEV-d"):
if "fuel blend" in dict_params[("Background",)]:
if "diesel" in dict_params[("Background",)]["fuel blend"]:
primary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["diesel"]["primary fuel"]["type"]
primary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["diesel"]["primary fuel"]["share"][y]
secondary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["diesel"]["secondary fuel"]["type"]
secondary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["diesel"]["secondary fuel"]["share"][y]
else:
if country in self.biodiesel.country.values:
share_biodiesel = np.squeeze(np.clip(
self.biodiesel.sel(
country=country
)
.interp(year=year, kwargs={"fill_value": "extrapolate"})
.values
, 0, 1)).tolist()
else:
share_biodiesel = 0
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = [
"diesel",
1 - share_biodiesel,
"biodiesel - cooking oil",
share_biodiesel,
]
else:
if country in self.biodiesel.country.values:
share_biodiesel = np.squeeze(np.clip(
self.biodiesel.sel(
country=country
)
.interp(year=year, kwargs={"fill_value": "extrapolate"})
.values
, 0, 1)).tolist()
else:
share_biodiesel = 0
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = [
"diesel",
1 - share_biodiesel,
"biodiesel - cooking oil",
share_biodiesel,
]
if pt in ("ICEV-g"):
if "fuel blend" in dict_params[("Background",)]:
if "cng" in dict_params[("Background",)]["fuel blend"]:
primary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["cng"]["primary fuel"]["type"]
primary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["cng"]["primary fuel"]["share"][y]
secondary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["cng"]["secondary fuel"]["type"]
secondary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["cng"]["secondary fuel"]["share"][y]
else:
if country in self.biomethane.country.values:
share_biomethane = np.squeeze(np.clip(
self.biomethane.sel(
country=country
)
.interp(year=year, kwargs={"fill_value": "extrapolate"})
.values
, 0, 1)).tolist()
else:
share_biomethane = 0
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = [
"cng",
1 - share_biomethane,
"biogas - sewage sludge",
share_biomethane,
]
else:
if country in self.biomethane.country.values:
share_biomethane = np.squeeze(np.clip(
self.biomethane.sel(
country=country
)
.interp(year=year, kwargs={"fill_value": "extrapolate"})
.values
, 0, 1)).tolist()
else:
share_biomethane = 0
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = [
"cng",
1 - share_biomethane,
"biogas - sewage sludge",
share_biomethane,
]
if pt in ("FCEV"):
if "fuel blend" in dict_params[("Background",)]:
if "hydrogen" in dict_params[("Background",)]["fuel blend"]:
primary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["hydrogen"]["primary fuel"]["type"]
primary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["hydrogen"]["primary fuel"]["share"][y]
secondary_fuel_type = dict_params[("Background",)][
"fuel blend"
]["hydrogen"]["secondary fuel"]["type"]
secondary_fuel_share = dict_params[("Background",)][
"fuel blend"
]["hydrogen"]["secondary fuel"]["share"][y]
else:
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = ["electrolysis", 1, "", ""]
else:
(
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
) = ["electrolysis", 1, "", ""]
params.extend(
[
battery_chem,
battery_origin,
primary_fuel_type,
primary_fuel_share,
secondary_fuel_type,
secondary_fuel_share,
]
)
arr.append(params)
return arr
def process_results(self, d, lang, job_id):
""" Calculate LCIA and store results in an array of arrays """
# Update task progress to db
task = Task.query.filter_by(id=job_id).first()
task.progress = 50
db.session.commit()
scope = {
"powertrain": d[("Functional unit",)]["powertrain"],
"size": d[("Functional unit",)]["size"],
}
self.dcts, self.arr = fill_xarray_from_input_parameters(self.cip, scope=scope)
arr = self.interpolate_array(d[("Functional unit",)]["year"])
modify_xarray_from_custom_parameters(d[("Foreground",)], arr)
# remove hybridization for vehicles before 2030
pwt = list({"ICEV-p", "ICEV-d", "ICEV-g"}.intersection(set(scope["powertrain"])))
years_before_2030 = [y for y in arr["year"].values if y < 2030]
if pwt and years_before_2030:
arr.loc[dict(
powertrain=pwt, year=years_before_2030, parameter="combustion power share"
)] = 1
cm = CarModel(arr, cycle=d[("Driving cycle",)])
# adjust the electricity density of the battery cells
for p in d[('Foreground',)]:
if p[3] == "battery cell energy density":
for y in d[("Foreground",)][p]:
cm.array.loc[
dict(
parameter="battery cell energy density",
year=y[0]
)
]= d[("Foreground",)][p][y]
if "electric utility factor" in d[("Background",)]:
uf = list(d[("Background",)]["electric utility factor"].values())
cm.set_all(electric_utility_factor=uf)
else:
cm.set_all()
pt = cm.array.powertrain.values
s = d[("Functional unit",)]["size"]
y = d[("Functional unit",)]["year"]
a = [pt] + [s] + [y]
l = list(itertools.product(*a))
l = [i[0] + " - " + i[1] + " - " + str(i[2]) for i in l]
cumsum = (
cm.energy.sel(
powertrain=pt,
size=s,
year=y,
value=0,
parameter=["motive energy", "auxiliary energy", "recuperated energy"],
)
.cumsum(dim="second")
.sum(dim="parameter")
.transpose("powertrain", "size", "year", "second")
.values.reshape(len(l), -1).astype("float64")
)
# Format the data so that it can be consumed directly
# by nvd3.js
TtW_energy = []
for i, vehicle in enumerate(l):
TtW_energy.append(
{
"key": vehicle,
"values": list(
map(lambda e: {"x": e[0], "y": e[1]}, enumerate(cumsum[i]))
),
}
)
# Functional unit
fu_unit = d[("Functional unit",)]["fu"]["unit"]
fu_qty = float(d[("Functional unit",)]["fu"]["quantity"])
if fu_unit == "vkm":
load_factor = 1
else:
load_factor = cm["average passengers"].mean().values
# Update task progress to db
task = Task.query.filter_by(id=job_id).first()
task.progress = 60
db.session.commit()
scope = {"powertrain": pt, "size": s, "year": y}
total_cost = cm.calculate_cost_impacts(scope=scope).transpose(
"size", "powertrain", "year", "value", "cost_type"
).astype("float64")
cost_benchmark = total_cost.sel(cost_type="total", value=0).values.reshape(
len(l)
)
cost_types = [c for c in total_cost.cost_type.values if c != "total"]
arr_benchmark = list(
map(
lambda x: [
"cost",
x[0].split(" - ")[0],
x[0].split(" - ")[1],
x[0].split(" - ")[2],
1 / x[1],
],
zip(l, cost_benchmark),
)
)
l_scatter = [x.replace(" - ", ", ") for x in l]
dict_scatter = {
x[0]: [x[1]]
for x in zip(l_scatter, cost_benchmark / load_factor * fu_qty)
}
detailed_cost = (
total_cost.sel(value=0, cost_type=cost_types).values.reshape(
len(l), len(cost_types)
)
/ load_factor
* fu_qty
)
a = [pt] + [s] + [y]
l_cost = list(itertools.product(*a))
list_res_costs = list(
map(
lambda x: [
[
"ownership cost",
x[0][1],
x[0][0],
x[0][2],
cost_types[y],
z,
np.sum(x[1]),
]
for y, z in enumerate(x[1])
],
zip(l_cost, detailed_cost),
)
)
list_res_costs = list(itertools.chain.from_iterable(list_res_costs))
self.ic = InventoryCalculation(
cm.array,
scope=d[("Functional unit",)]["fu"],
background_configuration=d[("Background",)],
)
# Update task progress to db
task = Task.query.filter_by(id=job_id).first()
task.progress = 70
db.session.commit()
results = (
self.ic.calculate_impacts()
.sel(value=0)
.transpose("impact_category", "size", "powertrain", "year", "impact")
).astype("float64")
lifetime = int(cm.array.sel(parameter="lifetime kilometers").mean().values)
# Update task progress to db
task = Task.query.filter_by(id=job_id).first()
task.progress = 80
db.session.commit()
impact = results.coords["impact"].values.tolist()
impact_category = results.coords["impact_category"].values
arr_benchmark.extend(
list(
map(
lambda x: [
"climate change",
x[0].split(" - ")[0],
x[0].split(" - ")[1],
x[0].split(" - ")[2],
1 / x[1],
],
zip(
l,
results.sel(impact_category="climate change")
.sum(dim="impact")
.values.reshape(len(l)),
),
)
)
)
arr_benchmark.extend(
list(
map(
lambda x: [
"fossil depletion",
x[0].split(" - ")[0],
x[0].split(" - ")[1],
x[0].split(" - ")[2],
1 / x[1] * 0.755, # 0.755 kg/L gasoline
],
zip(
l,
results.sel(impact_category="fossil depletion")
.sum(dim="impact")
.values.reshape(len(l)),
),
)
)
)
for x in zip(
l_scatter,
results.sel(impact_category="climate change")
.sum(dim="impact")
.values.reshape(len(l))
/ load_factor
* fu_qty,
):
existing_list = dict_scatter[x[0]]
existing_list.append(x[1])
dict_scatter[x[0]] = existing_list
a_wo_impact = [impact_category] + [s] + [pt] + [y]
l_impacts_wo_impact = list(itertools.product(*a_wo_impact))
list_res = list(
map(
lambda x: [
[x[0][0], x[0][1], x[0][2], x[0][3], impact[y], z, np.sum(x[1])]
for y, z in enumerate(x[1])
],
zip(
l_impacts_wo_impact,
(
results.values.reshape(
len(
l_impacts_wo_impact
),
len(impact)
)
/ load_factor
* fu_qty
),
),
)
)
list_res = list(itertools.chain.from_iterable(list_res))
list_res_acc = list(
map(
lambda x: [
x[0][0],
x[0][1],
x[0][2],
x[0][3],
| np.sum(x[1][4:-1]) | numpy.sum |
import SimpleITK as sitk
import numpy
import OpenEXR
import json
import os
import warnings
from .utils import __TIFF_HEADERS_ID, __get_pixeltype_from_channel, __get_exrpixel_from_channel,\
__change_array_type
from .pixeltype import PixelType
def convert_directory(path, output_pixel_type=None, verbose=True):
"""
Converts directory of EXR files to TIFF.
:param path: path of the directory.
:param output_pixel_type: If equal to None, the output file image will have the same pixel type or format
of that in the input file image. If changing the pixel type is desired, then
output_pixel_type can take the values defined by the fields in the class
exrconverter.pixeltype.PixelType. Example: output_pixel_type=PixelType.FLOAT32.
Since the underlying implementation uses numpy arrays, output_pixel_type can also take
numpy dtypes values, For example, output_pixel_type=numpy.float32.
:param verbose: Boolean variable for deciding whether to print warning messages.
:example: convert_directory(path='path/to/exr', output_pixel_type=numpy.float32, verbose=True)
"""
for filename in os.listdir(path):
if filename[-3:] != 'exr':
continue
if verbose:
print ("Converting: " + filename)
output_filename = path + '/' + filename[:-4] + ".tif"
convert(path + '/' + filename, output_filename, output_pixel_type, verbose)
def convert(input_exr, output_tiff, output_pixel_type=None, verbose=True):
"""
Converts an input EXR file into a TIFF file. Multiple layers in the input EXR file are created as multiple layers in the output Tiff file. The pixels in the output image file can also be set to a different type as that of
the pixels in the input image file.
:param input_exr: path (string) of the input EXR file.
:param output_tiff: path (string) to the output TIFF file.
:param output_pixel_type: If equal to None, the output file image will have the same pixel type or format
of that in the input file image. If changing the pixel type is desired, then
output_pixel_type can take the values defined by the fields in the class
exrconverter.pixeltype.PixelType. Example: output_pixel_type=PixelType.FLOAT32.
Since the underlying implementation uses numpy arrays, output_pixel_type can also take
numpy dtypes values, For example, output_pixel_type=numpy.float32.
:param verbose: Boolean variable for deciding whether to print warning messages.
:example: convert(input_exr="/path/to/input_exr.exr", output_tiff="/path/to/output_tiff.tiff",
output_pixel_type=numpy.float32, verbose=True)
"""
exr_file = OpenEXR.InputFile(input_exr)
exr_header = exr_file.header()
tiff_headers = json.loads(exr_header[__TIFF_HEADERS_ID])
dw = exr_header['dataWindow']
image_size = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1)
tiff_image = []
for channel_index in sorted(exr_header['channels'].keys(), key=lambda x: float(x)):
channel_type = exr_header['channels'][channel_index]
byte_image = exr_file.channel(str(channel_index), __get_exrpixel_from_channel(channel_type))
pixel_type = __get_pixeltype_from_channel(channel_type)
if output_pixel_type is None:
output_pixel_type = pixel_type
image_data = | numpy.frombuffer(byte_image, dtype=pixel_type) | numpy.frombuffer |
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sp
import math
import random as rm
import NumerosGenerados as ng
n = 100000
inicio = 0
ancho = 20
K = 3
numerosGamma = sp.gamma.rvs(size=n, a = K)
print("Media: ", round( | np.mean(numerosGamma) | numpy.mean |
from pathlib import Path
from numpy import arange, array, ceil, empty, floor, isnan, linspace, \
log10, meshgrid, nan, tile, transpose, where
from numpy.ma import masked_where
from matplotlib.pyplot import clf, close, cm, colorbar, figure, savefig, show
from mpl_toolkits.basemap import Basemap
from os.path import dirname, isdir, join, realpath
from os import mkdir
import pyapex, seaborn
from scipy.interpolate import interp2d#, RectBivariateSpline
#
from pyigrf.pyigrf import GetIGRF
from pyiri2016 import IRI2016
from pyiri2016 import IRI2016Profile
from pyiri2016.iriweb import irisubgl, firisubl
from timeutil import TimeUtilities
#
cwd = Path(__file__).parent
DataFolder = cwd / 'data'
class IRI2016_2DProf(IRI2016Profile):
#def __init__(self):
# pass
#def _GetTitle(self):
# IRI2016Profile()._GetTitle(__self__)
def HeightVsTime(self, FIRI=False, hrlim=[0., 24.], hrstp=1.):
self.option = 1
nhrstp = int((hrlim[1] + hrstp - hrlim[0]) / hrstp) + 1
hrbins = list(map(lambda x: hrlim[0] + float(x) * hrstp, range(nhrstp)))
Ne = empty((nhrstp, self.numstp))
if FIRI: NeFIRI = empty((nhrstp, self.numstp))
Te = empty((nhrstp, self.numstp))
Ti = empty((nhrstp, self.numstp))
for i in range(nhrstp):
self.hour = hrbins[i]
self.HeiProfile()
Ne[i, :] = self.a[0, range(self.numstp)]
if FIRI: NeFIRI[i, :] = self.a[12, range(self.numstp)]
Te[i, :] = self.a[3, range(self.numstp)]
Ti[i, :] = self.a[2, range(self.numstp)]
# self._GetTitle()
altbins = arange(self.vbeg, self.vend + self.vstp, self.vstp)
self.data2D = {'alt' : altbins, 'hour' : hrbins, \
'Ne' : Ne, 'Te' : Te, 'Ti' : Ti, \
'title1' : self.title1, 'title2' : self.title2}
if FIRI:
self.FIRI2D = {'alt' : altbins, 'hour' : hrbins, \
'Ne' : NeFIRI, \
'title1' : self.title1, 'title2' : self.title2}
#
# End of 'HeightVsTime'
#####
def LatVsLon(self, lonlim=[-180., 180.], lonstp=20.):
self.option = 2
nlonstp = int((lonlim[1] + lonstp - lonlim[0]) / lonstp) + 1
lonbins = list(map(lambda x: lonlim[0] + float(x) * lonstp, range(nlonstp)))
NmF2 = empty((nlonstp, self.numstp))
hmF2 = empty((nlonstp, self.numstp))
B0 = empty((nlonstp, self.numstp))
dip = empty((nlonstp, self.numstp))
for i in range(nlonstp):
self.lon = lonbins[i]
self.HeiProfile()
NmF2[i, :] = self.b[0, range(self.numstp)]
hmF2[i, :] = self.b[1, range(self.numstp)]
B0[i, :] = self.b[9, range(self.numstp)]
dip[i, :] = self.b[24, range(self.numstp)]
latbins = arange(self.vbeg, self.vend + self.vstp, self.vstp)
self.data2D = {'lat' : latbins, 'lon' : lonbins, \
'NmF2' : NmF2, 'hmF2' : hmF2, 'B0' : B0, 'dip' : dip, \
'title' : self.title3}
#
# End of 'LatVsLon'
#####
def LatVsFL(self, date=[2003, 11, 21], FIRI=False, IGRF=False, time=[23, 15, 0], \
gc=[-77.76, -11.95], \
hlim=[80., 200.], hstp=1., mlatlim=[-10., 10.], mlatstp=.1):
#
# INPUTS
#
# Date
year, month, day = date
# Time
hour, minute, second = time
# Geog. Coord.
dlon, dlat = gc
# hlim -> Height range at equator, in km
# hstp -> height resolution at equator, in km
# mlatlim -> Geom. latitude range, in degrees
# mlatstp -> Geom. latitude resolution, in degrees
#
###
doy = TimeUtilities().CalcDOY(year, month, day)
date2 = year + doy / (365 + 1 if TimeUtilities().IsLeapYear else 0)
# f = figure(figsize=(16,6))
# pn = f.add_subplot(111)
self.coordl, self.qdcoordl = [], []
for h in arange(hlim[0], hlim[1] + hstp, hstp):
gc, qc = pyapex.ApexFL().getFL(date=date2, dlon=dlon, dlat=dlat, \
hateq=h, mlatRange=mlatlim, mlatSTP=mlatstp)
# x, y, z = gc['lat'], gc['alt'], gc['lon']
# ind = where(y < hlim[0])
# if len(ind) > 0: x[ind], y[ind], z[ind] = nan, nan, nan
# pn.plot(x, y)
self.coordl.append([gc['lon'], gc['alt'], gc['lat']])
self.qdcoordl.append([qc['lon'], gc['alt'], qc['lat']])
# pn.invert_xaxis()
# show()
jf = IRI2016().Switches()
jmag = 0
mmdd = int(month * 100) + day
hour2 = hour + minute / 60 + second / 3600
self.coordl = array(self.coordl)
self.qdcoordl = array(self.qdcoordl)
# nfl -> No. of field-line (or height)
# nc -> No. of coord. (0 -> lon, 1 -> alt, 2 -> lat)
# np -> No. of points per field-line
nfl, nc, np = self.coordl.shape
self.ne, self.te = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.ti, self.tn = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nHe, self.nO = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nN2, self.nO2 = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nAr, self.nH = tile(nan, (np, nfl)), tile(nan, (np, nfl))
self.nN, self.babs = tile(nan, (np, nfl)), tile(nan, (np, nfl))
if FIRI: self.neFIRI = tile(nan, (np, nfl))
for fl in range(nfl):
curr_coordl = transpose(self.coordl[fl, :, :])
ind = where(curr_coordl[:, 1] >= (hlim[0] - 10.))
if len(ind[0]) > 0:
outf, oarr = irisubgl(jf, jmag, year, mmdd, hour2, \
curr_coordl[ind[0], :], DataFolder)
self.ne[ind[0], fl] = outf[0, :]
self.tn[ind[0], fl] = outf[1, :]
self.ti[ind[0], fl] = outf[2, :]
self.te[ind[0], fl] = outf[3, :]
if FIRI: self.neFIRI[ind[0], fl], ierr = firisubl(year, doy, hour2, \
curr_coordl[ind[0], :], DataFolder)
self.nHe[ind[0], fl] = outf[20, :]
self.nO[ind[0], fl] = outf[21, :]
self.nN2[ind[0], fl] = outf[22, :]
self.nO2[ind[0], fl] = outf[23, :]
self.nAr[ind[0], fl] = outf[24, :]
self.nH[ind[0], fl] = outf[26, :]
self.nN[ind[0], fl] = outf[27, :]
self.babs[ind[0], fl] = list(self.getIGRF(curr_coordl[ind[0], :], date2)) \
if IGRF else outf[19, :]
self.hlim = hlim
self.date, self.time = date, time
self.f107cm = oarr[40, 0]
self.ap, self.Ap = oarr[50, 0], oarr[51, 0]
#
# End of 'LatVsFL'
#####
def _Get_Title(self):
dateStr = 'DATE: {:4d}/{:02d}/{:02d}'.format(self.date[0], self.date[1], self.date[2])
timeStr = 'TIME: {:02d}:{:02d} UT'.format(self.time[0], self.time[1])
f107Str = 'F107: {:6.2f}'.format(self.f107cm)
apStr = 'ap: {:3d}'.format(int(self.ap))
ApStr = 'Ap: {:3d}'.format(int(self.Ap))
gmlon = self.qdcoordl[0, 0, 0]
gmlonStr = '{:7.2f} {:s}'.format(abs(gmlon), 'E' if gmlon > 0. else 'W')
self._title1 = '{:s} - {:s} - MAG. LON.:{:s}'.format(dateStr, timeStr, gmlonStr)
self._title2 = '{:s} - {:s}'.format(f107Str, ApStr)
#
# End of '_GetTitle'
######
def getIGRF(self, coordl, year):
for lon, alt, lat in coordl:
bn, be, bd, xl, icode = GetIGRF(lat, lon, alt, year)
# Horizontal component
bh = (bn**2 + be**2)**.5
yield bh
def PlotLatVsFL(self):
self._Get_Title()
nrow, ncol = 2, 2
spID = nrow * 100 + ncol * 10
counter = 0
X, Y = transpose(self.coordl[:, 2, :]), transpose(self.coordl[:, 1, :])
f = figure(figsize=(16, 6))
for ir in range(nrow):
for ic in range(ncol):
pn = f.add_subplot(spID + (counter + 1))
if counter == 0:
Z = log10(self.ne)
vmin, vmax, nc = 8, 12, 32+1
zlabel = 'Log$_{10}$N$_e$(m$^{-3}$)'
elif counter == 1:
Z = log10(self.nHe)
vmin, vmax, nc = 5, 9, 32+1
zlabel = 'Log$_{10}$H$_e$(m$^{-3}$)'
elif counter == 2:
Z = self.te
vmin, vmax, nc = 100, 1200, 36+1
zlabel = 'T$_e$($^\circ$)'
elif counter == 3:
Z = self.tn
vmin, vmax, nc = 100, 1200, 36+1
zlabel = 'T$_n$($^\circ$)'
Z_masked = masked_where(isnan(Z), Z)
C = linspace(vmin, vmax, nc, endpoint=True)
ipc = pn.contourf(X, Y, Z_masked, C, cmap=cm.jet, extent='both', origin='lower')
if counter == 0: pn.set_title(self._title1)
if counter == 1: pn.set_title(self._title2)
if counter > 1: pn.set_xlabel('Geog. Lat. ($^\circ$)')
pn.set_ylabel('Altitude (km)')
pn.set_ylim(self.hlim)
pn.invert_xaxis()
pn.grid()
cp = colorbar(ipc)
cp.set_label(zlabel)
counter += 1
show()
#
# End of 'PlotLatVsFL'
#####
def PlotLatVsFLFIRI(self, save=False, verbose=False):
self._Get_Title()
nrow, ncol = 1, 1
spID = nrow * 100 + ncol * 10
counter = 0
X, Y = transpose(self.coordl[:, 2, :]), | transpose(self.coordl[:, 1, :]) | numpy.transpose |
import functools
import typing as tp
import gin
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as np
from haiku._src import utils # pylint: disable=no-name-in-module
from huf.module_ops import Linear
from huf.module_ops import dropout as _dropout
from huf.types import Activation
from jax.experimental.sparse.ops import JAXSparse
from spax.linalg.linear_operators import HStacked
def dropout(x, rate: float, is_training: bool):
if isinstance(x, HStacked):
return HStacked(*(dropout(arg, rate, is_training) for arg in x.args))
return _dropout(x, rate, is_training)
configurable = functools.partial(gin.configurable, module="grax.hk_utils")
class Renormalize(hk.Module):
def __init__(
self,
create_scale: bool = True,
create_offset: bool = True,
name: tp.Optional[str] = None,
):
super().__init__(name=name)
self.create_scale = create_scale
self.create_offset = create_offset
def __call__(self, x):
assert x.ndim == 2
size = x.shape[-1]
if self.create_scale:
scale = hk.get_parameter(
"scale", shape=(size,), dtype=x.dtype, init=jnp.ones
)
x = x * scale
if self.create_offset:
offset = hk.get_parameter(
"offset", shape=(size,), dtype=x.dtype, init=jnp.zeros
)
x = x + offset
return x
class GatheredBatchNorm(hk.Module):
"""Normalizes inputs to maintain a mean of ~0 and stddev of ~1.
See: https://arxiv.org/abs/1502.03167.
There are many different variations for how users want to manage scale and
offset if they require them at all. These are:
- No scale/offset in which case ``create_*`` should be set to ``False`` and
``scale``/``offset`` aren't passed when the module is called.
- Trainable scale/offset in which case ``create_*`` should be set to
``True`` and again ``scale``/``offset`` aren't passed when the module is
called. In this case this module creates and owns the ``scale``/``offset``
variables.
- Externally generated ``scale``/``offset``, such as for conditional
normalization, in which case ``create_*`` should be set to ``False`` and
then the values fed in at call time.
NOTE: ``jax.vmap(hk.transform(BatchNorm))`` will update summary statistics and
normalize values on a per-batch basis; we currently do *not* support
normalizing across a batch axis introduced by vmap.
"""
def __init__(
self,
create_scale: bool,
create_offset: bool,
decay_rate: float,
eps: float = 1e-5,
scale_init: tp.Optional[hk.initializers.Initializer] = None,
offset_init: tp.Optional[hk.initializers.Initializer] = None,
axis: tp.Optional[tp.Sequence[int]] = None,
cross_replica_axis: tp.Optional[str] = None,
cross_replica_axis_index_groups: tp.Optional[
tp.Sequence[tp.Sequence[int]]
] = None,
data_format: str = "channels_last",
name: tp.Optional[str] = None,
):
"""Constructs a BatchNorm module.
Args:
create_scale: Whether to include a trainable scaling factor.
create_offset: Whether to include a trainable offset.
decay_rate: Decay rate for EMA.
eps: Small epsilon to avoid division by zero variance. Defaults ``1e-5``,
as in the paper and Sonnet.
scale_init: Optional initializer for gain (aka scale). Can only be set
if ``create_scale=True``. By default, ``1``.
offset_init: Optional initializer for bias (aka offset). Can only be set
if ``create_offset=True``. By default, ``0``.
axis: Which axes to reduce over. The default (``None``) signifies that all
but the channel axis should be normalized. Otherwise this is a list of
axis indices which will have normalization statistics calculated.
cross_replica_axis: If not ``None``, it should be a string representing
the axis name over which this module is being run within a ``jax.pmap``.
Supplying this argument means that batch statistics are calculated
across all replicas on that axis.
cross_replica_axis_index_groups: Specifies how devices are grouped.
data_format: The data format of the input. Can be either
``channels_first``, ``channels_last``, ``N...C`` or ``NC...``. By
default it is ``channels_last``.
name: The module name.
"""
super().__init__(name=name)
if not create_scale and scale_init is not None:
raise ValueError("Cannot set `scale_init` if `create_scale=False`")
if not create_offset and offset_init is not None:
raise ValueError("Cannot set `offset_init` if `create_offset=False`")
if cross_replica_axis is None and cross_replica_axis_index_groups is not None:
raise ValueError(
"`cross_replica_axis` name must be specified"
"if `cross_replica_axis_index_groups` are used."
)
self.create_scale = create_scale
self.create_offset = create_offset
self.eps = eps
self.scale_init = scale_init or jnp.ones
self.offset_init = offset_init or jnp.zeros
self.axis = axis
self.cross_replica_axis = cross_replica_axis
self.cross_replica_axis_index_groups = cross_replica_axis_index_groups
self.channel_index = utils.get_channel_index(data_format)
self.mean_ema = hk.ExponentialMovingAverage(decay_rate, name="mean_ema")
self.var_ema = hk.ExponentialMovingAverage(decay_rate, name="var_ema")
def __call__(
self,
inputs: jnp.ndarray,
ids: jnp.ndarray,
is_training: bool,
test_local_stats: bool = False,
scale: tp.Optional[jnp.ndarray] = None,
offset: tp.Optional[jnp.ndarray] = None,
) -> jnp.ndarray:
"""Computes the normalized version of the input.
Args:
inputs: An array, where the data format is ``[..., C]``.
is_training: Whether this is during training.
test_local_stats: Whether local stats are used when is_training=False.
scale: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the scale applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_scale=True``.
offset: An array up to n-D. The shape of this tensor must be broadcastable
to the shape of ``inputs``. This is the offset applied to the normalized
inputs. This cannot be passed in if the module was constructed with
``create_offset=True``.
Returns:
The array, normalized across all but the last dimension.
"""
if self.create_scale and scale is not None:
raise ValueError("Cannot pass `scale` at call time if `create_scale=True`.")
if self.create_offset and offset is not None:
raise ValueError(
"Cannot pass `offset` at call time if `create_offset=True`."
)
channel_index = self.channel_index
if channel_index < 0:
channel_index += inputs.ndim
if self.axis is not None:
axis = self.axis
else:
axis = [i for i in range(inputs.ndim) if i != channel_index]
if is_training or test_local_stats:
mean = jnp.mean(inputs[ids], axis, keepdims=True)
mean_of_squares = jnp.mean(inputs[ids] ** 2, axis, keepdims=True)
if self.cross_replica_axis:
mean = jax.lax.pmean(
mean,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups,
)
mean_of_squares = jax.lax.pmean(
mean_of_squares,
axis_name=self.cross_replica_axis,
axis_index_groups=self.cross_replica_axis_index_groups,
)
var = mean_of_squares - mean ** 2
else:
mean = self.mean_ema.average
var = self.var_ema.average
if is_training:
self.mean_ema(mean)
self.var_ema(var)
w_shape = [1 if i in axis else inputs.shape[i] for i in range(inputs.ndim)]
w_dtype = inputs.dtype
if self.create_scale:
scale = hk.get_parameter("scale", w_shape, w_dtype, self.scale_init)
elif scale is None:
scale = np.ones([], dtype=w_dtype)
if self.create_offset:
offset = hk.get_parameter("offset", w_shape, w_dtype, self.offset_init)
elif offset is None:
offset = | np.zeros([], dtype=w_dtype) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
# sim resolution
n = 100
# kit parameters
h = np.linspace(0.03, 0.63, num=20)#0.63
w = 0.41
r = w/2
R = 0.92
# initial brightness of perfect diffuse light source in the middle of the top plate
I0 = 1.0
total = []
# loop over kit heights
for j in range(20):
# preallocate the final field
field = np.zeros([2*n, 2*n])
for i in range(4):
# set up grid arrays
if i == 0:
x = np.linspace(-15*r - 0.5*r, 15*r - 0.5*r, 15*n)
y = np.linspace(-15*r - 0.5*r, 15*r - 0.5*r, 15*n)
elif i == 1:
x = np.linspace(-15*r + 0.5*r, 15*r + 0.5*r, 15*n)
y = np.linspace(-15*r - 0.5*r, 15*r - 0.5*r, 15*n)
elif i == 2:
x = np.linspace(-15*r - 0.5*r, 15*r - 0.5*r, 15*n)
y = np.linspace(-15*r + 0.5*r, 15*r + 0.5*r, 15*n)
elif i == 3:
x = np.linspace(-15*r + 0.5*r, 15*r + 0.5*r, 15*n)
y = np.linspace(-15*r + 0.5*r, 15*r + 0.5*r, 15*n)
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
# calculate distance in the ground plane
dist_gp = np.square(xv) + np.square(yv)
# calculate distance to light source
dist = dist_gp + h[j]*h[j]
# calculate light multiplication factor due to angle (Lambertian diffuse reflection)
lamb = np.cos(np.arctan(dist_gp/(h[j]*h[j])))
# invert to obtain intensities, multiply with lambertian factor twice, once for the emission (which is already a diffuse reflection off a surface, and once more to correct for the second diffuse reflection on the white bottom plane)
first_order = np.multiply(np.divide(I0*np.ones_like(dist), dist), np.square(lamb))
# calculate second order
second_order = np.zeros([n,n])
second_order += R*np.flip(first_order[6*n:7*n, 7*n:8*n], 0)
second_order += R*np.flip(first_order[8*n:9*n, 7*n:8*n], 0)
second_order += R*np.flip(first_order[7*n:8*n, 6*n:7*n], 1)
second_order += R*np.flip(first_order[7*n:8*n, 8*n:9*n], 1)
# calculate third order
third_order = np.zeros([n,n])
third_order += R*R*first_order[5*n:6*n, 7*n:8*n]
third_order += R*R*first_order[9*n:10*n, 7*n:8*n]
third_order += R*R*first_order[7*n:8*n, 5*n:6*n]
third_order += R*R*first_order[7*n:8*n, 9*n:10*n]
third_order += R*R*np.flip(np.flip(first_order[6*n:7*n, 6*n:7*n], 0), 1)
third_order += R*R*np.flip(np.flip(first_order[8*n:9*n, 6*n:7*n], 0), 1)
third_order += R*R*np.flip(np.flip(first_order[6*n:7*n, 8*n:9*n], 0), 1)
third_order += R*R*np.flip(np.flip(first_order[8*n:9*n, 8*n:9*n], 0), 1)
# calculate fourth order
fourth_order = np.zeros([n,n])
fourth_order += R*R*R*np.flip(first_order[4*n:5*n, 7*n:8*n], 0)
fourth_order += R*R*R*np.flip(first_order[10*n:11*n, 7*n:8*n], 0)
fourth_order += R*R*R*np.flip(first_order[7*n:8*n, 4*n:5*n], 1)
fourth_order += R*R*R*np.flip(first_order[7*n:8*n, 10*n:11*n], 1)
fourth_order += R*R*R*np.flip(first_order[6*n:7*n, 5*n:6*n], 0)
fourth_order += R*R*R*np.flip(first_order[6*n:7*n, 9*n:10*n], 0)
fourth_order += R*R*R*np.flip(first_order[8*n:9*n, 5*n:6*n], 0)
fourth_order += R*R*R*np.flip(first_order[8*n:9*n, 9*n:10*n], 0)
fourth_order += R*R*R*np.flip(first_order[5*n:6*n, 6*n:7*n], 1)
fourth_order += R*R*R*np.flip(first_order[9*n:10*n, 6*n:7*n], 1)
fourth_order += R*R*R*np.flip(first_order[5*n:6*n, 8*n:9*n], 1)
fourth_order += R*R*R*np.flip(first_order[9*n:10*n, 8*n:9*n], 1)
# calculate fifth order
fifth_order = np.zeros([n,n])
fifth_order += R*R*R*R*first_order[3*n:4*n, 7*n:8*n]
fifth_order += R*R*R*R*first_order[11*n:12*n, 7*n:8*n]
fifth_order += R*R*R*R*first_order[7*n:8*n, 3*n:4*n]
fifth_order += R*R*R*R*first_order[7*n:8*n, 11*n:12*n]
fifth_order += R*R*R*R*first_order[5*n:6*n, 5*n:6*n]
fifth_order += R*R*R*R*first_order[5*n:6*n, 9*n:10*n]
fifth_order += R*R*R*R*first_order[9*n:10*n, 5*n:6*n]
fifth_order += R*R*R*R*first_order[9*n:10*n, 9*n:10*n]
fifth_order += R*R*R*R*np.flip(np.flip(first_order[6*n:7*n, 4*n:5*n], 0), 1)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[6*n:7*n, 10*n:11*n], 0), 1)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[8*n:9*n, 4*n:5*n], 0), 1)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[8*n:9*n, 10*n:11*n], 0), 1)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[4*n:5*n, 6*n:7*n], 1), 0)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[10*n:11*n, 6*n:7*n], 1), 0)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[4*n:5*n, 8*n:9*n], 1), 0)
fifth_order += R*R*R*R*np.flip(np.flip(first_order[10*n:11*n, 8*n:9*n], 1), 0)
# calculate sixth order
sixth_order = np.zeros([n,n])
sixth_order += R*R*R*R*R*np.flip(first_order[2*n:3*n, 7*n:8*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[12*n:13*n, 7*n:8*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[7*n:8*n, 2*n:3*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[7*n:8*n, 12*n:13*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[4*n:5*n, 5*n:6*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[4*n:5*n, 9*n:10*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[10*n:11*n, 5*n:6*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[10*n:11*n, 9*n:10*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[5*n:6*n, 4*n:5*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[5*n:6*n, 10*n:11*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[9*n:10*n, 4*n:5*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[9*n:10*n, 10*n:11*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[6*n:7*n, 3*n:4*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[6*n:7*n, 11*n:12*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[8*n:9*n, 3*n:4*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[8*n:9*n, 11*n:12*n], 0)
sixth_order += R*R*R*R*R*np.flip(first_order[3*n:4*n, 6*n:7*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[11*n:12*n, 6*n:7*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[3*n:4*n, 8*n:9*n], 1)
sixth_order += R*R*R*R*R*np.flip(first_order[11*n:12*n, 8*n:9*n], 1)
# calculate seventh order
seventh_order = np.zeros([n,n])
seventh_order += R*R*R*R*R*R*first_order[n:2*n, 7*n:8*n]
seventh_order += R*R*R*R*R*R*first_order[13*n:14*n, 7*n:8*n]
seventh_order += R*R*R*R*R*R*first_order[7*n:8*n, n:2*n]
seventh_order += R*R*R*R*R*R*first_order[7*n:8*n, 13*n:14*n]
seventh_order += R*R*R*R*R*R*np.flip(np.flip(first_order[4*n:5*n, 4*n:5*n], 0), 1)
seventh_order += R*R*R*R*R*R*np.flip(np.flip(first_order[4*n:5*n, 10*n:11*n], 0), 1)
seventh_order += R*R*R*R*R*R*np.flip(np.flip(first_order[10*n:11*n, 4*n:5*n], 0), 1)
seventh_order += R*R*R*R*R*R*np.flip(np.flip(first_order[10*n:11*n, 10*n:11*n], 0), 1)
seventh_order += R*R*R*R*R*R*first_order[5*n:6*n, 3*n:4*n]
seventh_order += R*R*R*R*R*R*first_order[5*n:6*n, 11*n:12*n]
seventh_order += R*R*R*R*R*R*first_order[9*n:10*n, 3*n:4*n]
seventh_order += R*R*R*R*R*R*first_order[9*n:10*n, 11*n:12*n]
seventh_order += R*R*R*R*R*R*first_order[3*n:4*n, 5*n:6*n]
seventh_order += R*R*R*R*R*R*first_order[11*n:12*n, 5*n:6*n]
seventh_order += R*R*R*R*R*R*first_order[3*n:4*n, 9*n:10*n]
seventh_order += R*R*R*R*R*R*first_order[11*n:12*n, 9*n:10*n]
seventh_order += R*R*R*R*R*R*np.flip(first_order[2*n:3*n, 6*n:7*n], 1)
seventh_order += R*R*R*R*R*R*np.flip(first_order[12*n:13*n, 6*n:7*n], 1)
seventh_order += R*R*R*R*R*R*np.flip(first_order[6*n:7*n, 2*n:3*n], 0)
seventh_order += R*R*R*R*R*R*np.flip(first_order[6*n:7*n, 12*n:13*n], 0)
seventh_order += R*R*R*R*R*R*np.flip(first_order[2*n:3*n, 8*n:9*n], 1)
seventh_order += R*R*R*R*R*R*np.flip(first_order[12*n:13*n, 8*n:9*n], 1)
seventh_order += R*R*R*R*R*R*np.flip(first_order[8*n:9*n, 2*n:3*n], 0)
seventh_order += R*R*R*R*R*R*np.flip(first_order[8*n:9*n, 12*n:13*n], 0)
# calculate view on center field
field[n//2:3*n//2, n//2:3*n//2] += first_order[7*n:8*n, 7*n:8*n]
field[n//2:3*n//2, n//2:3*n//2] += second_order
field[n//2:3*n//2, n//2:3*n//2] += third_order
field[n//2:3*n//2, n//2:3*n//2] += fourth_order
field[n//2:3*n//2, n//2:3*n//2] += fifth_order
field[n//2:3*n//2, n//2:3*n//2] += sixth_order
field[n//2:3*n//2, n//2:3*n//2] += seventh_order
# calculate view on reflective surfaces
field[0:n//2, n//2:3*n//2] = R*np.flip(field[n//2:n, n//2:3*n//2], 0)
field[3*n//2:2*n, n//2:3*n//2] = R*np.flip(field[2*n//2:3*n//2, n//2:3*n//2], 0)
field[n//2:3*n//2, 0:n//2] = R*np.flip(field[n//2:3*n//2, n//2:n], 1)
field[n//2:3*n//2, 3*n//2:2*n] = R*np.flip(field[n//2:3*n//2, 2*n//2:3*n//2], 1)
# calculate view on second order reflective surfaces
field[0:n//2, 0:n//2] = R*R*np.flip(np.flip(field[n//2:n, n//2:n], 0), 1)
field[3*n//2:2*n, 0:n//2] = R*R*np.flip(np.flip(field[n:3*n//2, n//2:n], 0), 1)
field[0:n//2, 3*n//2:2*n] = R*R*np.flip(np.flip(field[n//2:n, n:3*n//2], 0), 1)
field[3*n//2:2*n, 3*n//2:2*n] = R*R*np.flip(np.flip(field[n:3*n//2, n:3*n//2], 0), 1)
total.append(np.mean(field[n//2:3*n//2, n//2:3*n//2]))
nir = [58.610050497549246, 58.269173824755065, 57.242368134793125, 55.736464685403192, 54.181837079854098, 52.758503106614597, 51.471398901133192, 50.282084788890359, 49.157540113528867, 48.078595484256851, 47.036377408708567, 46.027325842456158, 45.049853474124298, 44.102778356681569, 43.184833831613084, 42.294635308935071, 41.430765174639284, 40.591847807144219, 39.776589425281259, 38.983791288757537]
plt.figure()
plt.subplot(2,3,1)
plt.imshow(first_order[7*n:8*n, 7*n:8*n])
#plt.clim(0, 5.7)
plt.colorbar()
plt.title("First order")
plt.subplot(2,3,2)
plt.imshow(second_order)
#plt.clim(0, 5.7)
plt.colorbar()
plt.title("Second order")
plt.subplot(2,3,3)
plt.imshow(third_order)
#plt.clim(0, 5.7)
plt.colorbar()
plt.title("Third order")
plt.subplot(2,3,4)
plt.imshow(fourth_order)
#plt.clim(0, 5.7)
plt.colorbar()
plt.title("Fourth order")
plt.subplot(2,3,5)
plt.imshow(fifth_order)
#plt.clim(0, 5.7)
plt.colorbar()
plt.title("Fifth order")
plt.subplot(2,3,6)
plt.imshow(sixth_order)
#plt.clim(0, 5.7)
plt.colorbar()
plt.title("Sixth order")
plt.show(block=False)
#plt.figure()
#plt.imshow(lamb)
#plt.colorbar()
#plt.show(block=False)
plt.figure()
plt.imshow(field)
plt.colorbar()
plt.title("Combined orders")
plt.show(block=False)
print(np.divide(np.array(nir), | np.array(total) | numpy.array |
"""
Module of plotting functions.
Each function creates, and optionally saves, a plot of fields
from a ROMS history file.
INPUT: in_dict: a tuple with information to pass to the plot, such as:
- fn: text string with the full path name of the history file to plot
- fn_out: text string with full path of output file name
- auto_vlims: a boolean governing how color limits are set
- testing: a boolean for testing (e.g. shorter, faster particle tracking)
OUTPUT: either a screen image or a graphics file
"""
import numpy as np
import xarray as xr
import pickle
from datetime import datetime, timedelta
import pandas as pd
from cmocean import cm
from lo_tools import Lfun, zfun, zrfun
from lo_tools import plotting_functions as pfun
import pinfo
from importlib import reload
reload(pfun)
reload(pinfo)
Ldir = Lfun.Lstart()
if '_mac' in Ldir['lo_env']: # mac version
pass
else: # remote linux version
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def P_basic(in_dict):
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 10
pfun.start_plot(fs=fs, figsize=(int(hgt*2.5/AR),int(hgt)))
fig = plt.figure()
# PLOT CODE
vn_list = ['salt', 'temp']
ii = 1
for vn in vn_list:
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(1, len(vn_list), ii)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn], vlims_fac=pinfo.range_dict[vn])
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_title('Surface %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
ax.set_yticklabels([])
pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_fancy(in_dict):
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 10
pfun.start_plot(fs=fs, figsize=(int(hgt*2.5/AR),int(hgt)))
fig = plt.figure()
# PLOT CODE
vn_list = ['salt', 'temp']
ii = 1
for vn in vn_list:
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
if vn == 'salt':
cmap = 'jet'
vlims_fac = .5
elif vn == 'temp':
cmap = 'RdYlBu_r'
vlims_fac = 1
ax = fig.add_subplot(1, len(vn_list), ii)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict,
cmap=cmap, fac=pinfo.fac_dict[vn], vlims_fac=vlims_fac)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_title('Surface %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
ax.set_yticklabels([])
pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_dive_vort(in_dict):
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 10
pfun.start_plot(fs=fs, figsize=(int(hgt*2.5/AR),int(hgt)))
fig = plt.figure()
# create fields
u = ds.u[0,-1,:,:].values
v = ds.v[0,-1,:,:].values
dx = 1/ds.pm.values
dy = 1/ds.pn.values
# dive is on the trimmed rho grid
dive = np.diff(u[1:-1,:], axis=1)/dx[1:-1,1:-1] + np.diff(v[:,1:-1],axis=0)/dy[1:-1,1:-1]
# vort is on the psi grid (plot with lon_rho, lat_rho)
vort = np.diff(v,axis=1)/dx[1:,1:] - np.diff(u,axis=0)/dy[1:,1:]
# set color limits
vv = 2*np.nanstd(vort)
# PLOT CODE
if in_dict['auto_vlims']:
pinfo.vlims_dict['vort'] = (-vv, vv)
pinfo.vlims_dict['dive'] = (-vv, vv)
vmin = pinfo.vlims_dict['vort'][0]
vmax = pinfo.vlims_dict['vort'][1]
for ii in [1,2]:
ax = fig.add_subplot(1, 2, ii)
cmap = 'RdYlBu_r'
if ii == 1:
plon, plat = pfun.get_plon_plat(ds.lon_rho[1:-1,1:-1].values, ds.lat_rho[1:-1,1:-1].values)
cs = plt.pcolormesh(plon, plat, dive, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Divergence $[s^{-1}]$', fontsize=1.2*fs)
elif ii == 2:
cs = plt.pcolormesh(ds.lon_rho.values, ds.lat_rho.values, vort, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Vorticity $[s^{-1}]$', fontsize=1.2*fs)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
ax.set_xlabel('Longitude')
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
pass
#pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_dive_vort2(in_dict):
# same as dive_vort but focused on a specific region
# JdF:
aa = [-125, -122.3, 47.8, 48.8]
# START
ds = xr.open_dataset(in_dict['fn'])
# find aspect ratio of the map
# aa = pfun.get_aa(ds)
# AR is the aspect ratio of the map: Vertical/Horizontal
AR = (aa[3] - aa[2]) / (np.sin(np.pi*aa[2]/180)*(aa[1] - aa[0]))
fs = 14
hgt = 6
pfun.start_plot(fs=fs, figsize=(10,10))
fig = plt.figure()
# create fields
u = ds.u[0,-1,:,:].values
v = ds.v[0,-1,:,:].values
dx = 1/ds.pm.values
dy = 1/ds.pn.values
# dive is on the trimmed rho grid
dive = np.diff(u[1:-1,:], axis=1)/dx[1:-1,1:-1] + np.diff(v[:,1:-1],axis=0)/dy[1:-1,1:-1]
# vort is on the psi grid (plot with lon_rho, lat_rho)
vort = np.diff(v,axis=1)/dx[1:,1:] - np.diff(u,axis=0)/dy[1:,1:]
# set color limits
vv = 4*np.nanstd(vort)
# PLOT CODE
if in_dict['auto_vlims']:
pinfo.vlims_dict['vort'] = (-vv, vv)
pinfo.vlims_dict['dive'] = (-vv, vv)
vmin = pinfo.vlims_dict['vort'][0]
vmax = pinfo.vlims_dict['vort'][1]
for ii in [1,2]:
ax = fig.add_subplot(2, 1, ii)
cmap = 'RdYlBu_r'
if ii == 1:
plon, plat = pfun.get_plon_plat(ds.lon_rho[1:-1,1:-1].values, ds.lat_rho[1:-1,1:-1].values)
cs = plt.pcolormesh(plon, plat, dive, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Divergence $[s^{-1}]$', fontsize=1.2*fs)
elif ii == 2:
cs = plt.pcolormesh(ds.lon_rho.values, ds.lat_rho.values, vort, cmap=cmap, vmin = vmin, vmax = vmax)
ax.set_title('Surface Vorticity $[s^{-1}]$', fontsize=1.2*fs)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
ax.set_ylabel('Latitude')
if ii == 1:
pfun.add_info(ax, in_dict['fn'])
#pfun.add_windstress_flower(ax, ds)
#pfun.add_bathy_contours(ax, ds, txt=True)
elif ii == 2:
ax.set_xlabel('Longitude')
#pfun.add_velocity_vectors(ax, ds, in_dict['fn'])
ii += 1
#fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_ri(in_dict):
"""
Simplified Richardson number
"""
# START
fs = 10
pfun.start_plot(fs=fs, figsize=(20,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
xrho = ds['lon_rho'][0,:].values
yrho = ds['lat_rho'][:,0].values
# define box
aa = [-123.25, -122.1, 47, 48.75]
ix0 = zfun.find_nearest_ind(xrho, aa[0])
ix1 = zfun.find_nearest_ind(xrho, aa[1])
iy0 = zfun.find_nearest_ind(yrho, aa[2])
iy1 = zfun.find_nearest_ind(yrho, aa[3])
h = ds.h[iy0:iy1, ix0:ix1].values
rho_bot = ds.rho[0, 0, iy0:iy1, ix0:ix1].values
rho_top = ds.rho[0, -1, iy0:iy1, ix0:ix1].values
drho = rho_bot - rho_top
u = ds.ubar[0, iy0:iy1, ix0-1:ix1].values
v = ds.vbar[0, iy0-1:iy1, ix0:ix1].values
u[np.isnan(u)] = 0
v[np.isnan(v)] = 0
uu = (u[:, 1:] + u[:, :-1])/2
vv = (v[1:, :] + v[:-1, :])/2
spd2 = uu**2 + vv**2
spd2[np.isnan(drho)] = np.nan
spd2[spd2 < .001] = .001 # avoid divide by zero errors
# approximate Richardson number
rho0 = ds.rho0.values
g = 9.8
Ri = g * drho * h / (rho0 * spd2)
# psi_grid coordinates
x, y = np.meshgrid(ds.lon_u.values[0,ix0-1:ix1], ds.lat_v.values[iy0-1:iy1,0])
# PLOTTING
plt.close('all')
pfun.start_plot(fs=10, figsize=(18,10))
fig = plt.figure()
xt = [-123.2, -122.2]
yt = [47, 47.5, 48, 48.5]
ax = fig.add_subplot(131)
cs = ax.pcolormesh(x, y, drho, vmin=0, vmax=5, cmap=cm.dense)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax.set_title(r'$\Delta\rho\ [kg\ m^{-3}]$')
ax.set_xticks(xt)
ax.set_yticks(yt)
ax = fig.add_subplot(132)
cs = ax.pcolormesh(x, y, np.sqrt(spd2), vmin=0, vmax=2, cmap=cm.speed)
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax.set_title(r'Speed $[m\ s^{-1}]$')
ax.set_xticks(xt)
ax.set_yticks(yt)
ax.set_yticklabels([])
ax = fig.add_subplot(133)
cs = ax.pcolormesh(x, y, 4*Ri, vmin=0, vmax = 2, cmap='RdYlBu')
fig.colorbar(cs, ax=ax)
pfun.dar(ax)
pfun.add_coast(ax)
ax.axis(aa)
ax.set_title(r'$4 x Ri$')
ax.set_xticks(xt)
ax.set_yticks(yt)
ax.set_yticklabels([])
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_Chl_DO(in_dict):
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(14,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn_list = ['phytoplankton', 'oxygen']
fs = 14
ii = 1
for vn in vn_list:
if vn == 'phytoplankton':
slev = -1
stext = 'Surface'
elif vn == 'oxygen':
slev = 0
stext = 'Bottom'
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(1, len(vn_list), ii)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict, slev=slev,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn],
vlims_fac=pinfo.range_dict[vn], do_mask_edges=True)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_title('%s %s %s' % (stext, pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
pfun.add_bathy_contours(ax, ds, txt=True)
if ii == 1:
ax.set_ylabel('Latitude')
pfun.add_info(ax, in_dict['fn'])
pfun.add_windstress_flower(ax, ds)
ii += 1
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_DO_WA_shelf(in_dict):
# Focus on bottom DO on the WA shelf
aa = [-126.1, -123.7, 45.8, 48.8]
xtl = [-126, -125, -124]
ytl = [46, 47, 48]
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(7,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn = 'oxygen'
slev = 0
stext = 'Bottom'
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(111)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict, slev=slev,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn],
vlims_fac=pinfo.range_dict[vn], do_mask_edges=True)
fig.colorbar(cs)
pfun.add_coast(ax)
ax.axis(aa)
pfun.dar(ax)
ax.set_title('%s %s %s' % (stext, pinfo.tstr_dict[vn],pinfo.units_dict[vn]), fontsize=1.2*fs)
ax.set_xlabel('Longitude')
pfun.add_bathy_contours(ax, ds, txt=False)
ax.set_ylabel('Latitude')
ax.set_xticks(xtl)
ax.set_yticks(ytl)
pfun.add_info(ax, in_dict['fn'], loc='upper_right')
pfun.add_windstress_flower(ax, ds, t_scl=0.5, t_leglen=0.1, center=(.85,.65), fs=12)
# ADD MEAN WINDSTRESS VECTOR
# t_scl: scale windstress vector (smaller to get longer arrows)
# t_leglen: # Pa for wind stress vector legend
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_ths(in_dict):
# Plot property-property plots, like theta vs. s
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(10,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
# make a potential density field
import seawater as sw
s0 = 25; s1 = 35
th0 = 0; th1 = 20
SS, TH = np.meshgrid(np.linspace(s0, s1, 50), np.linspace(th0, th1, 50))
SIG = sw.dens0(SS, TH) - 1000
S = zrfun.get_basic_info(in_dict['fn'], only_S=True)
h = ds['h'].values
z = zrfun.get_z(h, 0*h, S, only_rho=True)
s = ds['salt'].values.squeeze()
th = ds['temp'].values.squeeze()
ax = fig.add_subplot(111)
ax.set_xlabel('Salinity')
ax.set_ylabel('Theta (deg C)')
ax.contour(SS, TH, SIG, 20)
nsub = 500
alpha = .1
mask = z > -10
ax.plot(s[mask][::nsub], th[mask][::nsub], '.r', alpha=alpha)
mask = (z < -10) & (z > -200)
ax.plot(s[mask][::nsub], th[mask][::nsub], '.g', alpha=alpha)
mask = z < -200
ax.plot(s[mask][::nsub], th[mask][::nsub], '.b', alpha=alpha)
ax.set_xlim(s0, s1)
ax.set_ylim(th0, th1)
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_debug(in_dict):
# Focused on debugging
vn_list = ['u', 'v', 'zeta']
do_wetdry = False
# START
fs = 10
pfun.start_plot(fs=fs, figsize=(8*len(vn_list),10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
ii = 1
for vn in vn_list:
if 'lon_rho' in ds[vn].coords:
tag = 'rho'
if 'lon_u' in ds[vn].coords:
tag = 'u'
if 'lon_v' in ds[vn].coords:
tag = 'v'
x = ds['lon_'+tag].values
y = ds['lat_'+tag].values
px, py = pfun.get_plon_plat(x,y)
if vn in ['u', 'v']:
v = ds[vn][0,-1,:,:].values
vmin = -2
vmax = 2
cmap='hsv_r'
elif vn == 'zeta':
v = ds[vn][0,:,:].values
h = ds.h.values
mr = ds.mask_rho.values
v[mr==0] = np.nan
h[mr==0] = np.nan
v = v + h
vn = 'depth'
vmin = 2
vmax = 4
cmap='RdYlGn'
else:
v = ds[vn][0, -1,:,:].values
ax = fig.add_subplot(1, len(vn_list), ii)
ax.set_xticks([])
ax.set_yticks([])
cs = ax.pcolormesh(px, py, v, cmap=cmap, vmin=vmin, vmax=vmax)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
if ii == 1:
pfun.add_info(ax, in_dict['fn'], his_num=True)
vmax, vjmax, vimax, vmin, vjmin, vimin = pfun.maxmin(v)
ax.plot(x[vjmax,vimax], y[vjmax,vimax],'*y', mec='k', markersize=15)
ax.plot(x[vjmin,vimin], y[vjmin,vimin],'oy', mec='k', markersize=10)
ax.set_title(('%s ((*)max=%0.1f, (o)min=%0.1f)' % (vn, vmax, vmin)))
ii += 1
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_layer(in_dict):
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(14,10))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn_list = ['oxygen', 'temp']
z_level = -250
zfull = pfun.get_zfull(ds, in_dict['fn'], 'rho')
ii = 1
for vn in vn_list:
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = ()
ax = fig.add_subplot(1, len(vn_list), ii)
laym = pfun.get_laym(ds, zfull, ds['mask_rho'][:], vn, z_level)
v_scaled = pinfo.fac_dict[vn]*laym
vlims = pinfo.vlims_dict[vn]
if len(vlims) == 0:
vlims = pfun.auto_lims(v_scaled)
pinfo.vlims_dict[vn] = vlims
cs = ax.pcolormesh(ds['lon_psi'][:], ds['lat_psi'][:], v_scaled[1:-1,1:-1],
vmin=vlims[0], vmax=vlims[1], cmap=pinfo.cmap_dict[vn])
cb = fig.colorbar(cs)
pfun.add_bathy_contours(ax, ds, txt=True)
pfun.add_coast(ax)
ax.axis(pfun.get_aa(ds))
pfun.dar(ax)
ax.set_xlabel('Longitude')
ax.set_title('%s %s on Z = %d (m)' % (pinfo.tstr_dict[vn], pinfo.units_dict[vn], z_level))
if ii == 1:
pfun.add_info(ax, in_dict['fn'])
ax.set_ylabel('Latitude')
pfun.add_windstress_flower(ax, ds)
if ii == 2:
pfun.add_velocity_vectors(ax, ds, in_dict['fn'], zlev=z_level)
ii += 1
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_sect(in_dict):
"""
This plots a map and a section (distance, z), and makes sure
that the color limits are identical. If the color limits are
set automatically then the section is the preferred field for
setting the limits.
I think this works best with -avl False (the default).
"""
# START
fs = 14
pfun.start_plot(fs=fs, figsize=(20,9))
fig = plt.figure()
ds = xr.open_dataset(in_dict['fn'])
# PLOT CODE
vn = 'phytoplankton'
# GET DATA
G, S, T = zrfun.get_basic_info(in_dict['fn'])
# CREATE THE SECTION
# create track by hand
if False:
lon = G['lon_rho']
lat = G['lat_rho']
zdeep = -3500
x = np.linspace(lon.min(), lon.max(), 500)
y = 47 * np.ones(x.shape)
# or read in a section (or list of sections)
else:
tracks_path = Ldir['data'] / 'section_lines'
tracks = ['Line_jdf_v0.p', 'Line_ps_main_v0.p']
zdeep = -300
xx = np.array([])
yy = np.array([])
for track in tracks:
track_fn = tracks_path / track
# get the track to interpolate onto
pdict = pickle.load(open(track_fn, 'rb'))
xx = np.concatenate((xx,pdict['lon_poly']))
yy = np.concatenate((yy,pdict['lat_poly']))
for ii in range(len(xx)-1):
x0 = xx[ii]
x1 = xx[ii+1]
y0 = yy[ii]
y1 = yy[ii+1]
nn = 20
if ii == 0:
x = np.linspace(x0, x1, nn)
y = np.linspace(y0,y1, nn)
else:
x = np.concatenate((x, np.linspace(x0, x1, nn)[1:]))
y = np.concatenate((y, np.linspace(y0, y1, nn)[1:]))
v2, v3, dist, idist0 = pfun.get_section(ds, vn, x, y, in_dict)
# COLOR
# scaled section data
sf = pinfo.fac_dict[vn] * v3['sectvarf']
# now we use the scaled section as the preferred field for setting the
# color limits of both figures in the case -avl True
if in_dict['auto_vlims']:
pinfo.vlims_dict[vn] = pfun.auto_lims(sf)
# PLOTTING
# map with section line
ax = fig.add_subplot(1, 3, 1)
cs = pfun.add_map_field(ax, ds, vn, pinfo.vlims_dict,
cmap=pinfo.cmap_dict[vn], fac=pinfo.fac_dict[vn], do_mask_edges=True)
# fig.colorbar(cs, ax=ax) # It is identical to that of the section
pfun.add_coast(ax)
aaf = [-125.5, -122.1, 46.8, 50.3] # focus domain
ax.axis(aaf)
pfun.dar(ax)
pfun.add_info(ax, in_dict['fn'], loc='upper_right')
ax.set_title('Surface %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]))
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
# add section track
ax.plot(x, y, '-r', linewidth=2)
ax.plot(x[idist0], y[idist0], 'or', markersize=5, markerfacecolor='w',
markeredgecolor='r', markeredgewidth=2)
ax.set_xticks([-125, -124, -123])
ax.set_yticks([47, 48, 49, 50])
# section
ax = fig.add_subplot(1, 3, (2, 3))
ax.plot(dist, v2['zbot'], '-k', linewidth=2)
ax.plot(dist, v2['zeta'], '-b', linewidth=1)
ax.set_xlim(dist.min(), dist.max())
ax.set_ylim(zdeep, 5)
# plot section
svlims = pinfo.vlims_dict[vn]
cs = ax.pcolormesh(v3['distf'], v3['zrf'], sf,
vmin=svlims[0], vmax=svlims[1], cmap=pinfo.cmap_dict[vn])
fig.colorbar(cs, ax=ax)
ax.set_xlabel('Distance (km)')
ax.set_ylabel('Z (m)')
ax.set_title('Section %s %s' % (pinfo.tstr_dict[vn],pinfo.units_dict[vn]))
fig.tight_layout()
# FINISH
ds.close()
pfun.end_plot()
if len(str(in_dict['fn_out'])) > 0:
plt.savefig(in_dict['fn_out'])
plt.close()
else:
plt.show()
def P_sect_soundspeed(in_dict):
"""
Soundspeed section plot
"""
import gsw
ds = xr.open_dataset(in_dict['fn'])
# create track by hand
x = np.linspace(-124.85,-124.2, 100) # shelf only
#x = np.linspace(-126,-124.2, 100) # shows SOFAR channel
y = 47 * | np.ones(x.shape) | numpy.ones |
import unittest
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_raises
class Test_BoundingBoxes(unittest.TestCase):
def test_bounding_box_stroke(self):
from pen_plots.strokes import bounding_box
stroke = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]])
bbox = bounding_box(stroke)
expected = [[0.0, 0.0], [1.0, 1.0]]
assert_array_almost_equal(bbox, expected)
def test_bounding_box_strokes(self):
from pen_plots.strokes import bounding_box
strokes = [
np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]),
np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 0.0]]),
]
bbox = bounding_box(strokes)
expected = [[0.0, 0.0], [1.0, 1.0]]
assert_array_almost_equal(bbox, expected)
class Test_Concatenation(unittest.TestCase):
def test_concat(self):
from pen_plots.strokes import concat
strokes = [
np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]),
np.array([[1.0, 1.0], [0.0, 1.0], [0.0, 0.0]]),
]
concatenated = concat(strokes)
expected = [[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]]
| assert_array_almost_equal(concatenated, expected) | numpy.testing.assert_array_almost_equal |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&221': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&222': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&223': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&224': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&225': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&226': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&227': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&228': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&229': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&230': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&231': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&232': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&233': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&234': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&235': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&236': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&237': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&238': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&239': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&240': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&241': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&242': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&243': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&244': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&245': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&246': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&247': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&248': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&249': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&250': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&251': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&252': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&253': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&254': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&255': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&256': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&257': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&258': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&259': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&260': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&261': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&262': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&263': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&264': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&265': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&266': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&267': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&268': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&269': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&270': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&271': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&272': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&273': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&274': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&275': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&276': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&277': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&278': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&279': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&280': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&281': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&282': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&283': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&284': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&285': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&286': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&287': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&288': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&289': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&290': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&291': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&292': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&293': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&294': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&295': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&296': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&297': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&298': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&299': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&300': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&301': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&302': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&303': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&304': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&305': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&306': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&307': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&308': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&309': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&310': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&311': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&312': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&313': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&314': np.array([-0.2741128763380603, -0.7260889090887469]),
'versicolor&0&0': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&1': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&2': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&3': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&4': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&5': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&6': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&7': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&8': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&9': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&10': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&11': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&12': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&13': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&14': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&15': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&16': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&17': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&18': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&19': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&20': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&21': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&22': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&23': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&24': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&25': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&26': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&27': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&28': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&29': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&30': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&31': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&32': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&33': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&34': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&35': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&36': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&37': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&38': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&39': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&40': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&41': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&42': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&43': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&44': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&45': np.array([0.05031696218434577, -0.929227611211748]),
'versicolor&0&46': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'versicolor&0&50': np.array([0.017148644765919676, -0.9632117581295891]),
'versicolor&0&51': np.array([0.6614632074748169, -0.6030419328583525]),
'versicolor&0&52': np.array([0.5519595359123358, -0.6434192906054143]),
'versicolor&0&53': np.array([0.14241819268815753, -0.8424615476000691]),
'versicolor&0&54': np.array([0.667423576348749, -0.6594086777766442]),
'versicolor&0&55': np.array([0.5429872243487625, -0.6697888833280774]),
'versicolor&0&56': np.array([0.1140907502997574, -0.8737800276630269]),
'versicolor&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'versicolor&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'versicolor&0&60': np.array([0.029402442458921384, -0.9481684282717414]),
'versicolor&0&61': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'versicolor&0&65': np.array([0.009887859354111524, -0.9698143912008228]),
'versicolor&0&66': np.array([0.42809266524335826, -0.40375108595117376]),
'versicolor&0&67': np.array([0.45547700380103057, -0.6083463409799501]),
'versicolor&0&68': np.array([0.19002455311770447, -0.8848597943731074]),
'versicolor&0&69': np.array([0.436966114193701, -0.4638042290788281]),
'versicolor&0&70': np.array([0.45424510803217066, -0.6425314361631614]),
'versicolor&0&71': np.array([0.1746467870122951, -0.9073062742839755]),
'versicolor&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'versicolor&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'versicolor&0&75': np.array([0.0, -0.95124502153736]),
'versicolor&0&76': np.array([0.0, -0.9708703761803881]),
'versicolor&0&77': np.array([0.0, 0.5659706098422994]),
'versicolor&0&78': np.array([0.0, 0.3962828716108186]),
'versicolor&0&79': np.array([0.0, 0.2538069363248767]),
'versicolor&0&80': np.array([0.0, -0.9708703761803881]),
'versicolor&0&81': np.array([0.0, -0.3631376646911367]),
'versicolor&0&82': np.array([0.0, -0.5804857652839247]),
'versicolor&0&83': np.array([0.0, -0.8943993997517804]),
'versicolor&0&84': np.array([0.0, -0.4231275527222919]),
'versicolor&0&85': np.array([0.0, -0.6164235822373675]),
'versicolor&0&86': np.array([0.0, -0.9166476163222441]),
'versicolor&0&87': np.array([0.0, 0.5659706098422994]),
'versicolor&0&88': np.array([0.0, 0.5659706098422994]),
'versicolor&0&89': np.array([0.0, 0.3962828716108186]),
'versicolor&0&90': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&91': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&92': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&93': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&94': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&95': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&96': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&97': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&98': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&99': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&100': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&101': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&102': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&103': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&104': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&105': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&106': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&107': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&108': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&109': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&110': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&111': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&112': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&113': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&114': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&115': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&116': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&117': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&118': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&119': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&120': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&121': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&122': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&123': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&124': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&125': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&126': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&127': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&128': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&129': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&130': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&131': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&132': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&133': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&134': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&135': np.array([-0.19684482070614498, -0.7845939961595055]),
'versicolor&0&136': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&137': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&138': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&139': np.array([-0.8063011502229427, 0.4134300066735808]),
'versicolor&0&140': np.array([-0.07475231751447156, -0.9062785678426409]),
'versicolor&0&141': np.array([-0.7985789197998611, 0.0026209054759345337]),
'versicolor&0&142': np.array([-0.7182275903095532, -0.11963032135457498]),
'versicolor&0&143': np.array([-0.2798927835773098, -0.6581136857450849]),
'versicolor&0&144': np.array([-0.7920119433269182, -0.0142751249964083]),
'versicolor&0&145': np.array([-0.6943081428778407, -0.14852813120265815]),
'versicolor&0&146': np.array([-0.16106555563262584, -0.777621649099753]),
'versicolor&0&147': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&148': np.array([-0.6782037543706109, 0.2956007367698983]),
'versicolor&0&149': np.array([-0.7694171988675237, 0.276633135028249]),
'versicolor&0&150': np.array([-0.7431524521056113, -0.24432235603856345]),
'versicolor&0&151': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&152': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&153': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&154': np.array([-0.9706534384443797, 0.007448195602953232]),
'versicolor&0&155': np.array([-0.4926091071260067, -0.49260910712601286]),
'versicolor&0&156': np.array([-0.967167257194905, -0.011919414234523772]),
'versicolor&0&157': np.array([-0.953200964337313, -0.027163424176667752]),
'versicolor&0&158': np.array([-0.8486399726113752, -0.13537345771621853]),
'versicolor&0&159': np.array([-0.9658161779555727, -0.01446062269877741]),
'versicolor&0&160': np.array([-0.9493506964095418, -0.0312186903717912]),
'versicolor&0&161': np.array([-0.7870031444780577, -0.1952404625292782]),
'versicolor&0&162': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&163': np.array([-0.9550700362273441, 0.025428672111930138]),
'versicolor&0&164': np.array([-0.9672121512728677, 0.012993005706020341]),
'versicolor&0&165': np.array([-0.19685199412911655, -0.7845879230594393]),
'versicolor&0&166': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&167': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&168': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&169': np.array([-0.7974072911132788, 0.006894018772033604]),
'versicolor&0&170': np.array([-0.07476043598366228, -0.9062715528546994]),
'versicolor&0&171': np.array([-0.7779663027946229, -0.2981599980028888]),
'versicolor&0&172': np.array([-0.6669876551417979, -0.2911996622134135]),
'versicolor&0&173': np.array([-0.3355030348883163, -0.6305271339971502]),
'versicolor&0&174': np.array([-0.7658431164447598, -0.3248317507526541]),
'versicolor&0&175': np.array([-0.6459073168288453, -0.31573292128613833]),
'versicolor&0&176': np.array([-0.2519677855687844, -0.7134447168661863]),
'versicolor&0&177': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&178': np.array([-0.7770298852793476, 0.029443430477147536]),
'versicolor&0&179': np.array([-0.7936433456054744, 0.012583752076496493]),
'versicolor&0&180': np.array([-0.05855179950109871, -0.9211684729232403]),
'versicolor&0&181': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&182': np.array([-0.5775164514598086, 0.6278692602817483]),
'versicolor&0&183': np.array([-0.6813845327458135, 0.6599725404733693]),
'versicolor&0&184': np.array([-0.5182062652425321, 0.3958533237517639]),
'versicolor&0&185': np.array([-0.020067537725011863, -0.960349531159508]),
'versicolor&0&186': np.array([-0.5107107533700952, 0.0075507123577884866]),
'versicolor&0&187': np.array([-0.1464063320531759, -0.4788055402156298]),
'versicolor&0&188': np.array([-0.061109248092233844, -0.8620287767000373]),
'versicolor&0&189': np.array([-0.4706137753079746, -0.057389625790424635]),
'versicolor&0&190': np.array([-0.06804620923037683, -0.5677904519730453]),
'versicolor&0&191': np.array([-0.020216773196675246, -0.9057119888626176]),
'versicolor&0&192': | np.array([-0.5775164514598086, 0.6278692602817483]) | numpy.array |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading and writing uvfits files."""
import os
import copy
import warnings
import numpy as np
from astropy import constants as const
from astropy.time import Time
from astropy.io import fits
from .uvdata import UVData
from .. import utils as uvutils
__all__ = ["UVFITS"]
class UVFITS(UVData):
"""
Defines a uvfits-specific subclass of UVData for reading and writing uvfits.
This class should not be interacted with directly, instead use the read_uvfits
and write_uvfits methods on the UVData class.
Attributes
----------
uvfits_required_extra : list of str
Names of optional UVParameters that are required for uvfits.
"""
uvfits_required_extra = [
"antenna_positions",
"gst0",
"rdate",
"earth_omega",
"dut1",
"timesys",
]
def _get_parameter_data(
self, vis_hdu, run_check_acceptability, background_lsts=True,
):
"""
Read just the random parameters portion of the uvfits file ("metadata").
Separated from full read so that header, metadata and data can be read
independently.
"""
# astropy.io fits reader scales date according to relevant PZER0 (?)
# uvfits standard is to have 2 DATE parameters, both floats:
# DATE (full day) and _DATE (fractional day)
# cotter uvfits files have one DATE that is a double
# using data.par('date') is general -- it will add them together if there are 2
self.time_array = vis_hdu.data.par("date")
self.Ntimes = len(np.unique(self.time_array))
# check if lst array is saved. It's not a standard metadata item in uvfits,
# but if the file was written with pyuvdata it may be present
# (depending on pyuvdata version)
proc = None
if "LST" in vis_hdu.data.parnames:
# angles in uvfits files are stored in degrees, so convert to radians
self.lst_array = np.deg2rad(vis_hdu.data.par("lst"))
if run_check_acceptability:
(
latitude,
longitude,
altitude,
) = self.telescope_location_lat_lon_alt_degrees
lst_array = uvutils.get_lst_for_time(
self.time_array, latitude, longitude, altitude
)
if not np.all(
np.isclose(
self.lst_array,
lst_array,
rtol=self._lst_array.tols[0],
atol=self._lst_array.tols[1],
)
):
warnings.warn(
"LST values stored in this file are not "
"self-consistent with time_array and telescope "
"location. Consider recomputing with "
"utils.get_lst_for_time."
)
else:
proc = self.set_lsts_from_time_array(background=background_lsts)
# if antenna arrays are present, use them. otherwise use baseline array
if "ANTENNA1" in vis_hdu.data.parnames and "ANTENNA2" in vis_hdu.data.parnames:
# Note: uvfits antennas are 1 indexed,
# need to subtract one to get to 0-indexed
self.ant_1_array = np.int32(vis_hdu.data.par("ANTENNA1")) - 1
self.ant_2_array = np.int32(vis_hdu.data.par("ANTENNA2")) - 1
subarray = np.int32(vis_hdu.data.par("SUBARRAY")) - 1
# error on files with multiple subarrays
if len(set(subarray)) > 1:
raise ValueError(
"This file appears to have multiple subarray "
"values; only files with one subarray are "
"supported."
)
else:
# cannot set this to be the baseline array because it uses the
# 256 convention, not our 2048 convention
bl_input_array = np.int64(vis_hdu.data.par("BASELINE"))
# get antenna arrays based on uvfits baseline array
self.ant_1_array, self.ant_2_array = self.baseline_to_antnums(
bl_input_array
)
# check for multi source files. NOW SUPPORTED, W00T!
if "SOURCE" in vis_hdu.data.parnames:
# Preserve the source info just in case the AIPS SU table is missing, and
# we need to revert things back.
self._set_multi_phase_center(preserve_phase_center_info=True)
source = vis_hdu.data.par("SOURCE")
self.phase_center_id_array = source.astype(int)
# get self.baseline_array using our convention
self.baseline_array = self.antnums_to_baseline(
self.ant_1_array, self.ant_2_array
)
self.Nbls = len(np.unique(self.baseline_array))
# initialize internal variables based on the antenna lists
self.Nants_data = int(np.union1d(self.ant_1_array, self.ant_2_array).size)
# read baseline vectors in units of seconds, return in meters
# FITS uvw direction convention is opposite ours and Miriad's.
# So conjugate the visibilities and flip the uvws:
self.uvw_array = (-1) * (
np.array(
np.stack(
(
vis_hdu.data.par("UU"),
vis_hdu.data.par("VV"),
vis_hdu.data.par("WW"),
)
)
)
* const.c.to("m/s").value
).T
if "INTTIM" in vis_hdu.data.parnames:
self.integration_time = np.asarray(
vis_hdu.data.par("INTTIM"), dtype=np.float64
)
else:
if self.Ntimes > 1:
# assume that all integration times in the file are the same
int_time = self._calc_single_integration_time()
self.integration_time = (
np.ones_like(self.time_array, dtype=np.float64) * int_time
)
else:
warnings.warn(
"The integration time is not specified and only one time is "
"present so it cannot be calculated from the difference between "
"integration times. Setting to None which will cause the check to "
"error. Set `run_check` to False to read in the file without "
"checking. Then set the integration_time (to an array of length "
"Nblts) directly on the object to allow futher processing."
)
if proc is not None:
proc.join()
def _get_data(
self,
vis_hdu,
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
read_metadata,
keep_all_metadata,
run_check,
check_extra,
run_check_acceptability,
strict_uvw_antpos_check,
fix_old_proj,
fix_use_ant_pos,
):
"""
Read just the visibility and flag data of the uvfits file.
Separated from full read so header and metadata can be read without data.
"""
# figure out what data to read in
blt_inds, freq_inds, pol_inds, history_update_string = self._select_preprocess(
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
)
if blt_inds is not None:
blt_frac = len(blt_inds) / float(self.Nblts)
else:
blt_frac = 1
if freq_inds is not None:
freq_frac = len(freq_inds) * float(self.Nspws) / float(self.Nfreqs)
else:
freq_frac = 1
if pol_inds is not None:
pol_frac = len(pol_inds) / float(self.Npols)
else:
pol_frac = 1
min_frac = np.min([blt_frac, freq_frac, pol_frac])
if min_frac == 1:
# no select, read in all the data
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :]
else:
# do select operations on everything except data_array, flag_array
# and nsample_array
self._select_metadata(
blt_inds, freq_inds, pol_inds, history_update_string, keep_all_metadata
)
# just read in the right portions of the data and flag arrays
if blt_frac == min_frac:
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[blt_inds, :, :, :, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :, :]
if freq_frac < 1:
raw_data_array = raw_data_array[:, :, freq_inds, :, :]
if pol_frac < 1:
raw_data_array = raw_data_array[:, :, :, pol_inds, :]
elif freq_frac == min_frac:
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[:, :, :, :, freq_inds, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[:, :, :, freq_inds, :, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :, :]
if blt_frac < 1:
raw_data_array = raw_data_array[blt_inds, :, :, :, :]
if pol_frac < 1:
raw_data_array = raw_data_array[:, :, :, pol_inds, :]
else:
if vis_hdu.header["NAXIS"] == 7:
raw_data_array = vis_hdu.data.data[:, :, :, :, :, pol_inds, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :, :]
assert self.Nspws == raw_data_array.shape[1]
else:
# in many uvfits files the spw axis is left out,
# here we put it back in so the dimensionality stays the same
raw_data_array = vis_hdu.data.data[:, :, :, :, pol_inds, :]
raw_data_array = raw_data_array[:, 0, 0, :, :, :]
raw_data_array = raw_data_array[:, np.newaxis, :, :, :]
if blt_frac < 1:
raw_data_array = raw_data_array[blt_inds, :, :, :, :]
if freq_frac < 1:
raw_data_array = raw_data_array[:, :, freq_inds, :, :]
assert len(raw_data_array.shape) == 5
# Reshape the data array to be the right size if we are working w/ multiple
# spectral windows to be 'flex_spw' compliant
if self.Nspws > 1:
raw_data_array = np.reshape(
raw_data_array,
(self.Nblts, 1, self.Nfreqs, self.Npols, raw_data_array.shape[4]),
)
# FITS uvw direction convention is opposite ours and Miriad's.
# So conjugate the visibilities and flip the uvws:
self.data_array = (
raw_data_array[:, :, :, :, 0] - 1j * raw_data_array[:, :, :, :, 1]
)
self.flag_array = raw_data_array[:, :, :, :, 2] <= 0
self.nsample_array = np.abs(raw_data_array[:, :, :, :, 2])
if fix_old_proj:
self.fix_phase(use_ant_pos=fix_use_ant_pos)
# check if object has all required UVParameters set
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
strict_uvw_antpos_check=strict_uvw_antpos_check,
allow_flip_conj=True,
)
def read_uvfits(
self,
filename,
antenna_nums=None,
antenna_names=None,
ant_str=None,
bls=None,
frequencies=None,
freq_chans=None,
times=None,
time_range=None,
lsts=None,
lst_range=None,
polarizations=None,
blt_inds=None,
keep_all_metadata=True,
read_data=True,
background_lsts=True,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
fix_old_proj=False,
fix_use_ant_pos=True,
):
"""
Read in header, metadata and data from a uvfits file.
Supports reading only selected portions of the data.
Parameters
----------
filename : str
The uvfits file to read from.
antenna_nums : array_like of int, optional
The antennas numbers to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_names` is also provided. Ignored if read_data is False.
antenna_names : array_like of str, optional
The antennas names to include when reading data into the object
(antenna positions and names for the removed antennas will be retained
unless `keep_all_metadata` is False). This cannot be provided if
`antenna_nums` is also provided. Ignored if read_data is False.
bls : list of tuple, optional
A list of antenna number tuples (e.g. [(0, 1), (3, 2)]) or a list of
baseline 3-tuples (e.g. [(0, 1, 'xx'), (2, 3, 'yy')]) specifying baselines
to include when reading data into the object. For length-2 tuples,
the ordering of the numbers within the tuple does not matter. For
length-3 tuples, the polarization string is in the order of the two
antennas. If length-3 tuples are provided, `polarizations` must be
None. Ignored if read_data is False.
ant_str : str, optional
A string containing information about what antenna numbers
and polarizations to include when reading data into the object.
Can be 'auto', 'cross', 'all', or combinations of antenna numbers
and polarizations (e.g. '1', '1_2', '1x_2y'). See tutorial for more
examples of valid strings and the behavior of different forms for ant_str.
If '1x_2y,2y_3y' is passed, both polarizations 'xy' and 'yy' will
be kept for both baselines (1, 2) and (2, 3) to return a valid
pyuvdata object.
An ant_str cannot be passed in addition to any of `antenna_nums`,
`antenna_names`, `bls` args or the `polarizations` parameters,
if it is a ValueError will be raised. Ignored if read_data is False.
frequencies : array_like of float, optional
The frequencies to include when reading data into the object, each
value passed here should exist in the freq_array. Ignored if
read_data is False.
freq_chans : array_like of int, optional
The frequency channel numbers to include when reading data into the
object. Ignored if read_data is False.
times : array_like of float, optional
The times to include when reading data into the object, each value
passed here should exist in the time_array.
time_range : array_like of float, optional
The time range in Julian Date to keep in the object, must be
length 2. Some of the times in the object should fall between the
first and last elements. Cannot be used with `times`.
lsts : array_like of float, optional
The local sidereal times (LSTs) to keep in the object, each value
passed here should exist in the lst_array. Cannot be used with
`times`, `time_range`, or `lst_range`.
lst_range : array_like of float, optional
The local sidereal time (LST) range in radians to keep in the
object, must be of length 2. Some of the LSTs in the object should
fall between the first and last elements. If the second value is
smaller than the first, the LSTs are treated as having phase-wrapped
around LST = 2*pi = 0, and the LSTs kept on the object will run from
the larger value, through 0, and end at the smaller value.
polarizations : array_like of int, optional
The polarizations numbers to include when reading data into the
object, each value passed here should exist in the polarization_array.
Ignored if read_data is False.
blt_inds : array_like of int, optional
The baseline-time indices to include when reading data into the
object. This is not commonly used. Ignored if read_data is False.
keep_all_metadata : bool
Option to keep all the metadata associated with antennas, even those
that do not have data associated with them after the select option.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run). Ignored if read_data is False.
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
Ignored if read_data is False.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done). Ignored if read_data is False.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
fix_old_proj : bool
Applies a fix to uvw-coordinates and phasing, assuming that the old `phase`
method was used prior to writing the data, which had errors of the order of
one part in 1e4 - 1e5. See the phasing memo for more details. Default is
False.
fix_use_ant_pos : bool
If setting `fix_old_proj` to True, use the antenna positions to derive the
correct uvw-coordinates rather than using the baseline vectors. Default is
True.
Raises
------
IOError
If filename doesn't exist.
ValueError
If incompatible select keywords are set (e.g. `ant_str` with other
antenna selectors, `times` and `time_range`) or select keywords
exclude all data or if keywords are set to the wrong type.
If the data have multi spw with different channel widths.
If the metadata are not internally consistent or missing.
"""
# update filename attribute
basename = os.path.basename(filename)
self.filename = [basename]
self._filename.form = (1,)
with fits.open(filename, memmap=True) as hdu_list:
vis_hdu = hdu_list[0] # assumes the visibilities are in the primary hdu
vis_hdr = vis_hdu.header.copy()
hdunames = uvutils._fits_indexhdus(hdu_list) # find the rest of the tables
# First get everything we can out of the header.
self._set_phased()
# check if we have an spw dimension
if vis_hdr["NAXIS"] == 7:
self.Nspws = vis_hdr.pop("NAXIS5")
self.spw_array = (
uvutils._fits_gethduaxis(vis_hdu, 5).astype(np.int64) - 1
)
# the axis number for phase center depends on if the spw exists
self.phase_center_ra_degrees = float(vis_hdr.pop("CRVAL6"))
self.phase_center_dec_degrees = float(vis_hdr.pop("CRVAL7"))
else:
self.Nspws = 1
self.spw_array = np.array([np.int64(0)])
# the axis number for phase center depends on if the spw exists
self.phase_center_ra_degrees = float(vis_hdr.pop("CRVAL5"))
self.phase_center_dec_degrees = float(vis_hdr.pop("CRVAL6"))
# get shapes
self.Npols = vis_hdr.pop("NAXIS3")
self.Nblts = vis_hdr.pop("GCOUNT")
if self.Nspws > 1:
# If this is multi-spw, use the 'flexible' spectral window setup
self._set_flex_spw()
uvfits_nchan = vis_hdr.pop("NAXIS4")
self.Nfreqs = uvfits_nchan * self.Nspws
self.flex_spw_id_array = np.transpose(
np.tile(np.arange(self.Nspws), (uvfits_nchan, 1))
).flatten()
fq_hdu = hdu_list[hdunames["AIPS FQ"]]
assert self.Nspws == fq_hdu.header["NO_IF"]
# TODO: This is fine for now, although I (karto) think that this
# is relative to the ref_freq, which can be specified as part of
# the AIPS SU table.
# Get rest freq value
ref_freq = uvutils._fits_gethduaxis(vis_hdu, 4)[0]
self.channel_width = np.transpose(
np.tile(abs(fq_hdu.data["CH WIDTH"]), (uvfits_nchan, 1))
).flatten()
self.freq_array = np.reshape(
np.transpose(
(
ref_freq
+ fq_hdu.data["IF FREQ"]
+ np.outer(np.arange(uvfits_nchan), fq_hdu.data["CH WIDTH"])
)
),
(1, -1),
)
else:
self.Nfreqs = vis_hdr.pop("NAXIS4")
self.freq_array = uvutils._fits_gethduaxis(vis_hdu, 4)
# TODO: Spw axis to be collapsed in future release
self.freq_array.shape = (1,) + self.freq_array.shape
self.channel_width = vis_hdr.pop("CDELT4")
self.polarization_array = np.int32(uvutils._fits_gethduaxis(vis_hdu, 3))
# other info -- not required but frequently used
self.object_name = vis_hdr.pop("OBJECT", None)
self.telescope_name = vis_hdr.pop("TELESCOP", None)
self.instrument = vis_hdr.pop("INSTRUME", None)
latitude_degrees = vis_hdr.pop("LAT", None)
longitude_degrees = vis_hdr.pop("LON", None)
altitude = vis_hdr.pop("ALT", None)
self.x_orientation = vis_hdr.pop("XORIENT", None)
blt_order_str = vis_hdr.pop("BLTORDER", None)
if blt_order_str is not None:
self.blt_order = tuple(blt_order_str.split(", "))
if self.blt_order == ("bda",):
self._blt_order.form = (1,)
self.history = str(vis_hdr.get("HISTORY", ""))
if not uvutils._check_history_version(
self.history, self.pyuvdata_version_str
):
self.history += self.pyuvdata_version_str
self.vis_units = vis_hdr.pop("BUNIT", "uncalib")
# Added here as a fix since some previous versions of UVData allowed for
# all caps versions of UNCALIB.
if self.vis_units == "UNCALIB":
self.vis_units = "uncalib"
self.phase_center_epoch = vis_hdr.pop("EPOCH", None)
# PHSFRAME is not a standard UVFITS keyword, but was used by older
# versions of pyuvdata. To ensure backwards compatibility, we look
# for it first to determine the coordinate frame for the data
self.phase_center_frame = vis_hdr.pop("PHSFRAME", None)
# If we don't find the special keyword PHSFRAME, try for the more
# FITS-standard RADESYS
if self.phase_center_frame is None:
self.phase_center_frame = vis_hdr.pop("RADESYS", None)
# If we still don't find anything, try the two 'special' variant names
# for the coordinate frame that seem to have been documented
if self.phase_center_frame is None:
self.phase_center_frame = vis_hdr.pop("RADESYSA", None)
if self.phase_center_frame is None:
self.phase_center_frame = vis_hdr.pop("RADESYSa", None)
# If we _still_ can't find anything, take a guess based on the value
# listed in the EPOCH. The behavior listed here is based off of the
# AIPS task REGRD (http://www.aips.nrao.edu/cgi-bin/ZXHLP2.PL?REGRD)
if self.phase_center_frame is None:
if self.phase_center_epoch is None:
self.phase_center_frame = "icrs"
else:
frame = "fk4" if (self.phase_center_epoch == 1950.0) else "fk5"
self.phase_center_frame = frame
self.extra_keywords = uvutils._get_fits_extra_keywords(
vis_hdr, keywords_to_skip=["DATE-OBS"]
)
# Next read the antenna table
ant_hdu = hdu_list[hdunames["AIPS AN"]]
# stuff in the header
if self.telescope_name is None:
self.telescope_name = ant_hdu.header["ARRNAM"]
self.gst0 = ant_hdu.header["GSTIA0"]
self.rdate = ant_hdu.header["RDATE"]
self.earth_omega = ant_hdu.header["DEGPDY"]
self.dut1 = ant_hdu.header["UT1UTC"]
if "TIMESYS" in ant_hdu.header.keys():
self.timesys = ant_hdu.header["TIMESYS"]
else:
# CASA misspells this one
self.timesys = ant_hdu.header["TIMSYS"]
if "FRAME" in ant_hdu.header.keys():
xyz_telescope_frame = ant_hdu.header["FRAME"]
else:
warnings.warn(
"Required Antenna keyword 'FRAME' not set; "
"Assuming frame is 'ITRF'."
)
xyz_telescope_frame = "ITRF"
# get telescope location and antenna positions.
# VLA incorrectly sets ARRAYX/ARRAYY/ARRAYZ to 0, and puts array center
# in the antenna positions themselves
if (
np.isclose(ant_hdu.header["ARRAYX"], 0)
and np.isclose(ant_hdu.header["ARRAYY"], 0)
and np.isclose(ant_hdu.header["ARRAYZ"], 0)
):
x_telescope = np.mean(ant_hdu.data["STABXYZ"][:, 0])
y_telescope = np.mean(ant_hdu.data["STABXYZ"][:, 1])
z_telescope = np.mean(ant_hdu.data["STABXYZ"][:, 2])
self.antenna_positions = ant_hdu.data.field("STABXYZ") - np.array(
[x_telescope, y_telescope, z_telescope]
)
else:
x_telescope = ant_hdu.header["ARRAYX"]
y_telescope = ant_hdu.header["ARRAYY"]
z_telescope = ant_hdu.header["ARRAYZ"]
# AIPS memo #117 says that antenna_positions should be relative to
# the array center, but in a rotated ECEF frame so that the x-axis
# goes through the local meridian.
rot_ecef_positions = ant_hdu.data.field("STABXYZ")
latitude, longitude, altitude = uvutils.LatLonAlt_from_XYZ(
np.array([x_telescope, y_telescope, z_telescope]),
check_acceptability=run_check_acceptability,
)
self.antenna_positions = uvutils.ECEF_from_rotECEF(
rot_ecef_positions, longitude
)
if xyz_telescope_frame == "ITRF":
self.telescope_location = np.array(
[x_telescope, y_telescope, z_telescope]
)
else:
if (
latitude_degrees is not None
and longitude_degrees is not None
and altitude is not None
):
self.telescope_location_lat_lon_alt_degrees = (
latitude_degrees,
longitude_degrees,
altitude,
)
# stuff in columns
ant_names = ant_hdu.data.field("ANNAME").tolist()
self.antenna_names = []
for ant_ind, name in enumerate(ant_names):
# Sometimes CASA writes antnames as bytes not strings.
# If the ant name is shorter than 8 characters, the trailing
# characters may be non-ascii.
# This is technically a FITS violation as FITS requires ascii.
# So we just ignore any non-ascii bytes in the decode.
if isinstance(name, bytes):
ant_name_str = str(name.decode("utf-8", "ignore"))
else:
ant_name_str = name
# remove non-printing ascii characters and exclamation points
ant_name_str = (
ant_name_str.replace("\x00", "")
.replace("\x07", "")
.replace("!", "")
)
self.antenna_names.append(ant_name_str)
# subtract one to get to 0-indexed values rather than 1-indexed values
self.antenna_numbers = ant_hdu.data.field("NOSTA") - 1
self.Nants_telescope = len(self.antenna_numbers)
if "DIAMETER" in ant_hdu.columns.names:
self.antenna_diameters = ant_hdu.data.field("DIAMETER")
try:
self.set_telescope_params()
except ValueError as ve:
warnings.warn(str(ve))
# Now read in the random parameter info
self._get_parameter_data(
vis_hdu, run_check_acceptability, background_lsts=background_lsts,
)
# If we find the source attribute in the FITS random paramter list,
# the multi_phase_center attribute will be set to True, and we should also
# expect that there must be an AIPS SU table.
if self.multi_phase_center and "AIPS SU" not in hdunames.keys():
warnings.warn(
"UVFITS file is missing AIPS SU table, which is required when "
"SOURCE is one of the `random paramters` in the main binary "
"table. Bypassing for now, but note that this file _may_ not "
"work correctly in UVFITS-based programs (e.g., AIPS, CASA)."
)
name = list(self.phase_center_catalog.keys())[0]
self.phase_center_ra = self.phase_center_catalog[name]["cat_lon"]
self.phase_center_dec = self.phase_center_catalog[name]["cat_lat"]
self.phase_center_frame = self.phase_center_catalog[name]["cat_frame"]
self.phase_center_epoch = self.phase_center_catalog[name]["cat_epoch"]
self.multi_phase_center = False
self._phase_center_id_array.required = False
self._Nphase.required = False
self._phase_center_catalog.required = False
self.object_name = name
self.Nphase = None
self.phase_center_catalog = None
self.phase_center_id_array = None
elif self.multi_phase_center:
su_hdu = hdu_list[hdunames["AIPS SU"]]
# We should have as many entries in the AIPS SU header as we have
# unique entries in the SOURCES random paramter (checked in the call
# to get_parameter_data above)
if len(su_hdu.data) != len(np.unique(self.phase_center_id_array)):
raise RuntimeError(
"The UVFITS file has a malformed AIPS SU table - number of "
"sources do not match the number of unique source IDs in the "
"primary data header."
) # pragma: no cover
# Reset the catalog, since it has some dummy information stored within
# it (that was pulled off the primary table)
self._remove_phase_center(list(self.phase_center_catalog.keys())[0])
# Set up these arrays so we can assign values to them
self.phase_center_app_ra = np.zeros(self.Nblts)
self.phase_center_app_dec = np.zeros(self.Nblts)
self.phase_center_app_pa = np.zeros(self.Nblts)
# Alright, we are off to the races!
for idx in range(len(su_hdu.data)):
# Grab the indv source entry
sou_info = su_hdu.data[idx]
sou_id = sou_info["ID. NO."]
sou_name = sou_info["SOURCE"]
sou_ra = sou_info["RAEPO"] * (np.pi / 180.0)
sou_dec = sou_info["DECEPO"] * (np.pi / 180.0)
sou_epoch = sou_info["EPOCH"]
sou_frame = "fk5"
self._add_phase_center(
sou_name,
cat_id=sou_id,
cat_type="sidereal",
cat_lon=sou_ra,
cat_lat=sou_dec,
cat_frame=sou_frame,
cat_epoch=sou_epoch,
info_source="uvfits file",
)
# Calculate the apparent coordinate values
self._set_app_coords_helper()
if not read_data:
# don't read in the data. This means the object is a metadata
# only object but that may not matter for many purposes.
return
# Now read in the data
self._get_data(
vis_hdu,
antenna_nums,
antenna_names,
ant_str,
bls,
frequencies,
freq_chans,
times,
time_range,
lsts,
lst_range,
polarizations,
blt_inds,
False,
keep_all_metadata,
run_check,
check_extra,
run_check_acceptability,
strict_uvw_antpos_check,
fix_old_proj,
fix_use_ant_pos,
)
def write_uvfits(
self,
filename,
spoof_nonessential=False,
write_lst=True,
force_phase=False,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Write the data to a uvfits file.
Parameters
----------
filename : str
The uvfits file to write to.
spoof_nonessential : bool
Option to spoof the values of optional UVParameters that are not set
but are required for uvfits files.
write_lst : bool
Option to write the LSTs to the metadata (random group parameters).
force_phase : bool
Option to automatically phase drift scan data to zenith of the first
timestamp.
run_check : bool
Option to check for the existence and proper shapes of parameters
before writing the file.
check_extra : bool
Option to check optional parameters as well as required ones.
run_check_acceptability : bool
Option to check acceptable range of the values of parameters before
writing the file.
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
The `phase_type` of the object is "drift" and the `force_phase`
keyword is not set.
If the frequencies are not evenly spaced or are separated by more
than their channel width.
The polarization values are not evenly spaced.
Any of ['antenna_positions', 'gst0', 'rdate', 'earth_omega', 'dut1',
'timesys'] are not set on the object and `spoof_nonessential` is False.
If the `timesys` parameter is not set to "UTC".
TypeError
If any entry in extra_keywords is not a single string or number.
"""
if run_check:
self.check(
check_extra=check_extra,
run_check_acceptability=run_check_acceptability,
check_freq_spacing=True,
strict_uvw_antpos_check=strict_uvw_antpos_check,
)
if self.phase_type == "phased":
pass
elif self.phase_type == "drift":
if force_phase:
print(
"The data are in drift mode and do not have a "
"defined phase center. Phasing to zenith of the first "
"timestamp."
)
phase_time = Time(self.time_array[0], format="jd")
self.phase_to_time(phase_time)
else:
raise ValueError(
"The data are in drift mode. "
"Set force_phase to true to phase the data "
"to zenith of the first timestamp before "
"writing a uvfits file."
)
if self.flex_spw:
# If we have a 'flexible' spectral window, we will need to evaluate the
# frequency axis slightly differently.
if self.future_array_shapes:
freq_array_use = self.freq_array
else:
freq_array_use = self.freq_array[0, :]
nchan_list = []
start_freq_array = []
delta_freq_array = []
for idx in self.spw_array:
chan_mask = self.flex_spw_id_array == idx
nchan_list += [np.sum(chan_mask)]
start_freq_array += [freq_array_use[chan_mask][0]]
# Need the array direction here since channel_width is always supposed
# to be > 0, but channels can be in decending freq order
freq_dir = np.sign(np.median(np.diff(freq_array_use[chan_mask])))
delta_freq_array += [
np.median(self.channel_width[chan_mask]) * freq_dir
]
start_freq_array = np.reshape(np.array(start_freq_array), (1, -1)).astype(
np.float64
)
delta_freq_array = np.reshape(np.array(delta_freq_array), (1, -1)).astype(
np.float64
)
# We've constructed a couple of lists with relevant values, now time to
# check them to make sure that the data will write correctly
# Make sure that all the windows are of the same size
if len(np.unique(nchan_list)) != 1:
raise IndexError(
"UVFITS format cannot handle spectral windows of different sizes!"
)
# Make sure freq values are greater zero. Note that I think _technically
# one could write negative frequencies into the dataset, but I am pretty
# sure that reduction packages may balk hard.
if np.any(start_freq_array <= 0):
raise ValueError("Frequency values must be > 0 for UVFITS!")
# Make sure the delta values are non-zero
if np.any(delta_freq_array == 0):
raise ValueError("Something is wrong, frequency values not unique!")
# If we passed all the above checks, then it's time to fill some extra
# array values. Note that 'ref_freq' is something of a placeholder for
# other exciting things...
ref_freq = start_freq_array[0, 0]
else:
if self.future_array_shapes:
ref_freq = self.freq_array[0]
# we've already run the check_freq_spacing, so channel widths are the
# same to our tolerances
delta_freq_array = np.array([[np.median(self.channel_width)]]).astype(
np.float64
)
else:
ref_freq = self.freq_array[0, 0]
delta_freq_array = | np.array([[self.channel_width]]) | numpy.array |
# 3D棒グラフの作成
# 利用するライブラリ
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
#%%
### 値の指定
## スカラの場合
# 起点を指定
x = 0.0
y = 0.0
z = 0.0
# バーのハーフサイズの値を指定
a = 0.5
# 変化量を指定
dx = a * 2.0
dy = a * 2.0
dz = 1.0
#%%
## 1次元配列の場合
# 値を作成
vals = np.arange(3)
# 格子点を作成
X, Y = np.meshgrid(vals, vals)
# 起点を設定
x = X.flatten()
y = Y.flatten()
z = np.zeros_like(x)
# バーのハーフサイズの値を指定
a = 0.5
# 変化量を指定
dx = np.repeat(a=a * 2.0, repeats=len(x))
dy = np.repeat(a=a * 2.0, repeats=len(y))
dz = np.arange(len(z))
#%%
### 引数とバーの関係
# 3D棒グラフを作成
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x, y=y, z=z, dx=dx, dy=dy, dz=dz, color='white', alpha=0.25) # 3D棒グラフ
ax.scatter(xs=x, ys=y, zs=z, s=100, color='purple', label='(x, y, z)') # 起点
ax.scatter(xs=x+dx, ys=y, zs=z, s=100, color='red', label='(x+dx, y, z)') # x軸方向に変化した点
ax.scatter(xs=x, ys=y+dy, zs=z, s=100, color='pink', label='(x, y+dy, z)') # y軸方向に変化した点
ax.scatter(xs=x, ys=y, zs=z+dz, s=100, color='orange', label='(x, y, z+dz)') # z軸方向に変化した点
ax.scatter(xs=x+dx, ys=y+dy, zs=z+dz, s=100, color='springgreen', label='(x+dx, y+dy, z+dz)') # 全ての軸で変化した点
ax.quiver(x, y, z, dx, 0.0, 0.0, color='purple', linestyle='--', arrow_length_ratio=0.1) # x軸の変化量
ax.quiver(x, y, z, 0.0, dy, 0.0, color='purple', linestyle='--', arrow_length_ratio=0.1) # y軸の変化量
ax.quiver(x, y, z, 0.0, 0.0, dz, color='purple', linestyle='--', arrow_length_ratio=0.1) # z軸の変化量
ax.quiver(x, y, z, dx, dy, dz, color='purple', arrow_length_ratio=0.1) # 全ての軸の変化量
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('bar3d', fontsize='20') # タイトル
fig.legend() # 凡例
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show() # 描画
# 3D棒グラフを作成
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x, y=y, z=z, dx=dx, dy=dy, dz=dz, color='white', alpha=0.25) # 3D棒グラフ
ax.scatter(xs=x, ys=y, zs=z, s=100, color='purple', label='(x, y, z)') # 起点
ax.scatter(xs=x+dx, ys=y, zs=z, s=100, color='red', label='(x+dx, y, z)') # x軸方向に変化した点
ax.scatter(xs=x+dx, ys=y+dy, zs=z, s=100, color='aqua', label='(x+dx, y+dy, z)') # x軸とy軸方向に変化した点
ax.scatter(xs=x+dx, ys=y+dy, zs=z+dz, s=100, color='springgreen', label='(x+dx, y+dy, z+dz)') # 全ての軸で変化した点
ax.quiver(x, y, z, dx, 0.0, 0.0, color='purple', linestyle=':', arrow_length_ratio=0.1) # x軸の変化量
ax.quiver(x+dx, y, z, 0.0, dy, 0.0, color='purple', linestyle=':', arrow_length_ratio=0.1) # y軸の変化量
ax.quiver(x+dx, y+dy, z, 0.0, 0.0, dz, color='purple', linestyle=':', arrow_length_ratio=0.1) # z軸の変化量
ax.quiver(x, y, z, dx, dy, dz, color='purple', arrow_length_ratio=0.1) # 全ての軸の変化量
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('bar3d', fontsize='20') # タイトル
fig.legend() # 凡例
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show() # 描画
#%%
### 起点の調整
# デフォルトの設定
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x, y=y, z=z, dx=dx, dy=dy, dz=dz, color='white', alpha=0.5) # 3D棒グラフ
ax.scatter(xs=x, ys=y, zs=z, s=100, color='purple', label='(x, y, z)') # 起点
ax.quiver(x, y, z, dx, 0.0, 0.0, color='purple', linestyle='--', arrow_length_ratio=0.1) # x軸の変化量
ax.quiver(x, y, z, 0.0, dy, 0.0, color='purple', linestyle='--', arrow_length_ratio=0.1) # y軸の変化量
ax.quiver(x, y, z, 0.0, 0.0, dz, color='purple', linestyle='--', arrow_length_ratio=0.1) # z軸の変化量
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('(x, y, z)', fontsize='20') # タイトル
fig.legend() # 凡例
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show() # 描画
# 起点をズラす
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x-a, y=y-a, z=z, dx=dx, dy=dy, dz=dz, color='white', alpha=0.5) # 3D棒グラフ
ax.scatter(xs=x, ys=y, zs=z, color='purple', s=100, label='(x, y, z)') # 元の起点
ax.scatter(xs=x-a, ys=y-a, zs=z, color='green', s=100, label='(x-' + str(a) + ', y-' + str(a) + ', z)') # 調整後の起点
ax.quiver(x-a, y-a, z, dx, 0.0, 0.0, color='green', linestyle='--', arrow_length_ratio=0.1) # x軸の変化量
ax.quiver(x-a, y-a, z, 0.0, dy, 0.0, color='green', linestyle='--', arrow_length_ratio=0.1) # y軸の変化量
ax.quiver(x-a, y-a, z, 0.0, 0.0, dz, color='green', linestyle='--', arrow_length_ratio=0.1) # z軸の変化量
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('(x-a, y-a, z)', fontsize='20') # タイトル
fig.legend() # 凡例
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show() # 描画
#%%
### shade引数
# 影を付ける:(デフォルト)
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x, y=y, z=z, dx=dx, dy=dy, dz=dz, shade=True) # 3D棒グラフ
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('shade=True', fontsize='20') # タイトル
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show() # 描画
# 影を消す
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x-a, y=y-a, z=z, dx=dx, dy=dy, dz=dz, shade=False) # 3D棒グラフ
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('shade=False', fontsize='20') # タイトル
#ax.view_init(elev=90, azim=270) # 表示アングル
plt.show() # 描画
#%%
### color引数
# カラーマップを指定
cm = plt.get_cmap('jet')
# RGBA情報に変換
print(cm(1.0))
# カラーマップを指定
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x-a, y=y-a, z=z, dx=dx, dy=dy, dz=dz,
color=cm(dz / np.max(dz)), alpha=0.5) # 3D棒グラフ
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title("cmap='jet'", fontsize='20') # タイトル
plt.show() # 描画
#%%
### edgecolor引数
# カラーマップを指定
cm = plt.get_cmap('rainbow')
# デフォルトの設定
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x-a, y=y-a, z=z, dx=dx, dy=dy, dz=dz,
color=cm(dz / np.max(dz)), edgecolor=cm(dz / np.max(dz)), alpha=0.5) # 3D棒グラフ
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('edgecolor=cm(...)', fontsize='20') # タイトル
plt.show() # 描画
# 面と辺の色を一致させる
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=x-a, y=y-a, z=z, dx=dx, dy=dy, dz=dz,
color=cm(dz / np.max(dz)), edgecolor=cm(np.repeat(dz / np.max(dz), 6)), alpha=0.5) # 3D棒グラフ
ax.set_xlabel('x') # x軸ラベル
ax.set_ylabel('y') # y軸ラベル
ax.set_zlabel('z') # z軸ラベル
ax.set_title('edgecolor=cm(np.repeat(..., 6))', fontsize='20') # タイトル
plt.show() # 描画
#%%
# カラーマップを指定
cm = plt.get_cmap('rainbow')
# カラーマップを確認
plt.figure(figsize=(9, 8)) # 図の設定
plt.scatter(np.arange(6), np.arange(6), color=cm(np.arange(6) / 5), s=250) # 散布図
plt.grid() # グリッド線
plt.title("cmap='rainbow'", fontsize=20) # タイトル
plt.show() # 描画
# 辺の色付け順を確認
fig = plt.figure(figsize=(9, 8)) # 図の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax = fig.add_subplot(projection='3d') # 3D用の設定
ax.bar3d(x=-0.5, y=-0.5, z=0.0, dx=1.0, dy=1.0, dz=1.0,
color='white', edgecolors=cm( | np.arange(6) | numpy.arange |
import numpy as np
from pyBKT.generate import synthetic_data
from pyBKT.generate import random_model, random_model_uni
from pyBKT.fit import EM_fit
from copy import deepcopy
from pyBKT.util import print_dot
#parameters
num_subparts = 4
num_resources = 2
num_fit_initializations = 25
observation_sequence_lengths = np.full(50, 100, dtype=np.int)
#generate synthetic model and data.
#model is really easy.
truemodel = {}
truemodel["As"] = | np.zeros((num_resources, 2, 2), dtype=np.float_) | numpy.zeros |
import gym
from gym import spaces, logger
import math
import numpy as np
from gym.utils import seeding
from copy import deepcopy
import warnings
import os
class TaskT(gym.Env):
metadata = {'name':'TaskT', 'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 50}
spec = {'id':'TaskT'}
def __init__(self, sections=1, seq='RGB', final_reward=False, reward_obs=True, R=4, saving=False, log_dir="./TaskT_log/"):
"""
Sequential target reaching task.
:param sections: how many targets to reach to finish the task
:param seq: any combination of 'R', 'G', 'B' to indicated the required sequence of target-reaching.
:param final_reward: if True, only final target provides reward, otherwise all targets provide reward.
:param reward_obs: whether reward is one element of observation
:param R: difficulty (distance between targets)
:param saving: whether to save steps/rewards into txt file
:param log_dir: directory to save steps/rewards
"""
self.sections = sections
self.saving = saving
self.log_dir = log_dir
self.final_reward = final_reward
self.reward_obs = reward_obs
self.sequence = seq
self.R = R
self.reward = 0.0
self.reward_signal = 0.0
self.dim_position = 2
self.dim_action = 2
self.speed = 0.8
self.radius = 0.5
self.max_steps = 128
self.steps = 0
self.init_position = np.array([7.5, 7.5], dtype=np.float32)
self.init_position[0] += np.float32(15 * (np.random.rand() - 0.5))
self.init_position[1] += np.float32(15 * (np.random.rand() - 0.5))
self.old_position = self.init_position
self.new_position = self.init_position
self.orientation = 2 * np.pi * np.random.rand()
self.init_state = 0
self.size = 1
self.action_space = spaces.Box(low=-1., high=1., shape=(2,))
if reward_obs:
self.observation_space = spaces.Box(low=-1., high=5., shape=(12,))
else:
self.observation_space = spaces.Box(low=-1., high=1., shape=(11,))
self.reward_range = (-np.Inf, np.Inf)
self._seed()
if self.saving:
if os.path.exists(log_dir):
warnings.warn('{} exists (possibly so do data).'.format(log_dir))
else:
os.makedirs(log_dir)
path = self.log_dir + 'TaskT' + '.txt'
self.file_pointer = open(path, 'w+')
self.red_position = np.float32(R * ( | np.random.rand(self.dim_position) | numpy.random.rand |
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is not the main demo script. It is simple a handy script generate more
# interesting data to try with the main demo script, `clustering.py`.
#
# This script is about setting up this more interesting data and then passing
# the data to `clustering.cluster_points(..)`, a key function `clustering.py`.
import numpy as np
from utilities import visualize_scatterplot
from clustering import cluster_points
# Set up three different clusters of data points
covariance = [[3, 0], [0, 3]]
n_points = 3
x0, y0 = np.random.multivariate_normal([0, 0], covariance, n_points).T
x1, y1 = np.random.multivariate_normal([10, 5], covariance, n_points).T
x2, y2 = np.random.multivariate_normal([5, 15], covariance, n_points).T
# Combine data points together into a list of tuples
# Note: data points now look like [(x0, y0), (x1, y1), ..]
xs = np.hstack([x0, x1, x2])
ys = np.hstack([y0, y1, y2])
xys = | np.vstack([xs, ys]) | numpy.vstack |
import numpy as np
np.random.seed(1234)
import matplotlib.pyplot as plt
from scipy.stats import gamma, beta, betaprime
from pyhawkes.models import DiscreteTimeNetworkHawkesModelSpikeAndSlab
from pybasicbayes.util.text import progprint_xrange
if __name__ == "__main__":
"""
Create a discrete time Hawkes model and generate from it.
:return:
"""
K = 1
T = 50
dt = 1.0
dt_max = 3.0
# network_hypers = {'C': 1, 'p': 0.5, 'kappa': 3.0, 'alpha': 3.0, 'beta': 1.0/20.0}
network_hypers = {'c': np.zeros(K, dtype=np.int), 'p': 0.5, 'kappa': 10.0, 'v': 10*3.0}
bkgd_hypers = {"alpha": 1., "beta": 10.}
model = DiscreteTimeNetworkHawkesModelSpikeAndSlab(K=K, dt=dt, dt_max=dt_max,
weight_hypers={"parallel_resampling": False},
network_hypers=network_hypers)
model.generate(T=T)
# Gibbs sample and then generate new data
N_samples = 10000
samples = []
lps = []
for itr in progprint_xrange(N_samples, perline=50):
# Resample the model
model.resample_model()
samples.append(model.copy_sample())
lps.append(model.log_likelihood())
# Geweke step
model.data_list.pop()
model.generate(T=T)
# Compute sample statistics for second half of samples
A_samples = np.array([s.weight_model.A for s in samples])
W_samples = np.array([s.weight_model.W for s in samples])
g_samples = np.array([s.impulse_model.g for s in samples])
lambda0_samples = np.array([s.bias_model.lambda0 for s in samples])
lps = | np.array(lps) | numpy.array |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Training script for split CUB experiment with zero shot transfer.
"""
from __future__ import print_function
import argparse
import os
import sys
import math
import time
import datetime
import numpy as np
import tensorflow as tf
from copy import deepcopy
from six.moves import cPickle as pickle
from utils.data_utils import image_scaling, random_crop_and_pad_image, random_horizontal_flip, construct_split_cub
from utils.utils import get_sample_weights, sample_from_dataset, concatenate_datasets, update_episodic_memory_with_less_data, samples_for_each_class, sample_from_dataset_icarl, load_task_specific_data
from utils.vis_utils import plot_acc_multiple_runs, plot_histogram, snapshot_experiment_meta_data, snapshot_experiment_eval, snapshot_task_labels
from model import Model
###############################################################
################ Some definitions #############################
### These will be edited by the command line options ##########
###############################################################
## Training Options
NUM_RUNS = 5 # Number of experiments to average over
TRAIN_ITERS = 2000 # Number of training iterations per task
BATCH_SIZE = 16
LEARNING_RATE = 0.1
RANDOM_SEED = 1234
VALID_OPTIMS = ['SGD', 'MOMENTUM', 'ADAM']
OPTIM = 'SGD'
OPT_MOMENTUM = 0.9
OPT_POWER = 0.9
VALID_ARCHS = ['CNN', 'VGG', 'RESNET-B']
ARCH = 'RESNET-B'
PRETRAIN = True
## Model options
MODELS = ['VAN', 'PI', 'EWC', 'MAS', 'RWALK', 'A-GEM'] #List of valid models
IMP_METHOD = 'EWC'
SYNAP_STGTH = 75000
FISHER_EMA_DECAY = 0.9 # Exponential moving average decay factor for Fisher computation (online Fisher)
FISHER_UPDATE_AFTER = 50 # Number of training iterations for which the F_{\theta}^t is computed (see Eq. 10 in RWalk paper)
SAMPLES_PER_CLASS = 5 # Number of samples per task
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
TOTAL_CLASSES = 200 # Total number of classes in the dataset
EPS_MEM_BATCH_SIZE = 128
DEBUG_EPISODIC_MEMORY = False
KEEP_EPISODIC_MEMORY_FULL = False
K_FOR_CROSS_VAL = 3
## Logging, saving and testing options
LOG_DIR = './split_cub_results'
SNAPSHOT_DIR = './cub_snapshots'
SAVE_MODEL_PARAMS = False
## Evaluation options
## Task split
NUM_TASKS = 20
MULTI_TASK = False
## Dataset specific options
ATTR_DIMS = 312
DATA_DIR='CUB_data/CUB_200_2011/images'
#CUB_TRAIN_LIST = 'dataset_lists/tmp_list.txt'
#CUB_TEST_LIST = 'dataset_lists/tmp_list.txt'
CUB_TRAIN_LIST = 'dataset_lists/CUB_train_list.txt'
CUB_TEST_LIST = 'dataset_lists/CUB_test_list.txt'
CUB_ATTR_LIST = 'dataset_lists/CUB_attr_in_order.pickle'
RESNET18_IMAGENET_CHECKPOINT = './resnet-18-pretrained-imagenet/model.ckpt'
# Define function to load/ store training weights. We will use ImageNet initialization later on
def save(saver, sess, logdir, step):
'''Save weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
logdir: path to the snapshots directory.
step: current training step.
'''
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
'''Load trained weights.
Args:
saver: TensorFlow Saver object.
sess: TensorFlow session.
ckpt_path: path to checkpoint file with parameters.
'''
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_arguments():
"""Parse all the arguments provided from the CLI.
Returns:
A list of parsed arguments.
"""
parser = argparse.ArgumentParser(description="Script for split CUB hybrid experiment.")
parser.add_argument("--cross-validate-mode", action="store_true",
help="If option is chosen then snapshoting after each batch is disabled")
parser.add_argument("--online-cross-val", action="store_true",
help="If option is chosen then enable the online cross validation of the learning rate")
parser.add_argument("--train-single-epoch", action="store_true",
help="If option is chosen then train for single epoch")
parser.add_argument("--set-hybrid", action="store_true",
help="If option is chosen then train using hybrid model")
parser.add_argument("--eval-single-head", action="store_true",
help="If option is chosen then evaluate on a single head setting.")
parser.add_argument("--arch", type=str, default=ARCH,
help="Network Architecture for the experiment.\
\n \nSupported values: %s"%(VALID_ARCHS))
parser.add_argument("--num-runs", type=int, default=NUM_RUNS,
help="Total runs/ experiments over which accuracy is averaged.")
parser.add_argument("--train-iters", type=int, default=TRAIN_ITERS,
help="Number of training iterations for each task.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Mini-batch size for each task.")
parser.add_argument("--random-seed", type=int, default=RANDOM_SEED,
help="Random Seed.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Starting Learning rate for each task.")
parser.add_argument("--optim", type=str, default=OPTIM,
help="Optimizer for the experiment. \
\n \nSupported values: %s"%(VALID_OPTIMS))
parser.add_argument("--imp-method", type=str, default=IMP_METHOD,
help="Model to be used for LLL. \
\n \nSupported values: %s"%(MODELS))
parser.add_argument("--synap-stgth", type=float, default=SYNAP_STGTH,
help="Synaptic strength for the regularization.")
parser.add_argument("--fisher-ema-decay", type=float, default=FISHER_EMA_DECAY,
help="Exponential moving average decay for Fisher calculation at each step.")
parser.add_argument("--fisher-update-after", type=int, default=FISHER_UPDATE_AFTER,
help="Number of training iterations after which the Fisher will be updated.")
parser.add_argument("--do-sampling", action="store_true",
help="Whether to do sampling")
parser.add_argument("--mem-size", type=int, default=SAMPLES_PER_CLASS,
help="Number of samples per class from previous tasks.")
parser.add_argument("--is-herding", action="store_true",
help="Herding based sampling")
parser.add_argument("--data-dir", type=str, default=DATA_DIR,
help="Directory from where the CUB data will be read.\
NOTE: Provide path till <CUB_DIR>/images")
parser.add_argument("--init-checkpoint", type=str, default=RESNET18_IMAGENET_CHECKPOINT,
help="TF checkpoint file containing initialization for ImageNet.\
NOTE: NPZ file for VGG and TF Checkpoint for ResNet")
parser.add_argument("--log-dir", type=str, default=LOG_DIR,
help="Directory where the plots and model accuracies will be stored.")
return parser.parse_args()
def train_task_sequence(model, sess, saver, datasets, class_attr, classes_per_task, cross_validate_mode, train_single_epoch, eval_single_head, do_sampling, is_herding,
mem_per_class, train_iters, batch_size, num_runs, init_checkpoint, online_cross_val, random_seed):
"""
Train and evaluate LLL system such that we only see a example once
Args:
Returns:
dict A dictionary containing mean and stds for the experiment
"""
# List to store accuracy for each run
runs = []
task_labels_dataset = []
break_training = 0
# Loop over number of runs to average over
for runid in range(num_runs):
print('\t\tRun %d:'%(runid))
# Initialize the random seeds
np.random.seed(random_seed+runid)
# Get the task labels from the total number of tasks and full label space
task_labels = []
total_classes = classes_per_task * model.num_tasks
if online_cross_val:
label_array = np.arange(total_classes)
else:
class_label_offset = K_FOR_CROSS_VAL * classes_per_task
label_array = np.arange(class_label_offset, total_classes+class_label_offset)
np.random.shuffle(label_array)
for tt in range(model.num_tasks):
tt_offset = tt*classes_per_task
task_labels.append(list(label_array[tt_offset:tt_offset+classes_per_task]))
print('Task: {}, Labels:{}'.format(tt, task_labels[tt]))
# Store the task labels
task_labels_dataset.append(task_labels)
# Set episodic memory size
episodic_mem_size = mem_per_class * total_classes
# Initialize all the variables in the model
sess.run(tf.global_variables_initializer())
if PRETRAIN:
# Load the variables from a checkpoint
if model.network_arch == 'RESNET-B':
# Define loader (weights which will be loaded from a checkpoint)
restore_vars = [v for v in model.trainable_vars if 'fc' not in v.name and 'attr_embed' not in v.name]
loader = tf.train.Saver(restore_vars)
load(loader, sess, init_checkpoint)
elif model.network_arch == 'VGG':
# Load the pretrained weights from the npz file
weights = np.load(init_checkpoint)
keys = sorted(weights.keys())
for i, key in enumerate(keys[:-2]): # Load everything except the last layer
sess.run(model.trainable_vars[i].assign(weights[key]))
# Run the init ops
model.init_updates(sess)
# List to store accuracies for a run
evals = []
# List to store the classes that we have so far - used at test time
test_labels = []
if model.imp_method == 'S-GEM':
# List to store the episodic memories of the previous tasks
task_based_memory = []
if model.imp_method == 'A-GEM':
# Reserve a space for episodic memory
episodic_images = np.zeros([episodic_mem_size, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])
episodic_labels = np.zeros([episodic_mem_size, TOTAL_CLASSES])
episodic_filled_counter = 0
a_gem_logit_mask = np.zeros([model.num_tasks, TOTAL_CLASSES])
# Labels for all the tasks that we have seen in the past
prev_task_labels = []
prev_class_attrs = np.zeros_like(class_attr)
if do_sampling:
# List to store important samples from the previous tasks
last_task_x = None
last_task_y_ = None
# Mask for the softmax
logit_mask = np.zeros(TOTAL_CLASSES)
# Training loop for all the tasks
for task in range(len(task_labels)):
print('\t\tTask %d:'%(task))
# If not the first task then restore weights from previous task
if(task > 0):
model.restore(sess)
# If sampling flag is set append the previous datasets
if do_sampling:
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
if task > 0:
task_train_images, task_train_labels = concatenate_datasets(task_tr_images, task_tr_labels, last_task_x, last_task_y_)
else:
task_train_images = task_tr_images
task_train_labels = task_tr_labels
else:
# Extract training images and labels for the current task
task_train_images, task_train_labels = load_task_specific_data(datasets[0]['train'], task_labels[task])
# If multi_task is set then train using all the datasets of all the tasks
if MULTI_TASK:
if task == 0:
for t_ in range(1, len(task_labels)):
task_tr_images, task_tr_labels = load_task_specific_data(datasets[0]['train'], task_labels[t_])
task_train_images = np.concatenate((task_train_images, task_tr_images), axis=0)
task_train_labels = np.concatenate((task_train_labels, task_tr_labels), axis=0)
else:
# Skip training for this task
continue
print('Received {} images, {} labels at task {}'.format(task_train_images.shape[0], task_train_labels.shape[0], task))
# Test for the tasks that we've seen so far
test_labels.extend(task_labels[task])
# Declare variables to store sample importance if sampling flag is set
if do_sampling:
# Get the sample weighting
task_sample_weights = get_sample_weights(task_train_labels, test_labels)
else:
# Assign equal weights to all the examples
task_sample_weights = np.ones([task_train_labels.shape[0]], dtype=np.float32)
num_train_examples = task_train_images.shape[0]
# Train a task observing sequence of data
logit_mask[:] = 0
if train_single_epoch:
# Ceiling operation
num_iters = (num_train_examples + batch_size - 1) // batch_size
if cross_validate_mode:
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
else:
num_iters = train_iters
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
# Randomly suffle the training examples
perm = np.arange(num_train_examples)
np.random.shuffle(perm)
train_x = task_train_images[perm]
train_y = task_train_labels[perm]
task_sample_weights = task_sample_weights[perm]
# Array to store accuracies when training for task T
ftask = []
if MULTI_TASK:
logit_mask[:] = 1.0
masked_class_attrs = class_attr
else:
# Attribute mask
masked_class_attrs = np.zeros_like(class_attr)
if do_sampling:
masked_class_attrs[test_labels] = class_attr[test_labels]
else:
masked_class_attrs[task_labels[task]] = class_attr[task_labels[task]]
# Training loop for task T
for iters in range(num_iters):
if train_single_epoch and not cross_validate_mode and not MULTI_TASK:
#if (iters <= 50 and iters % 5 == 0) or (iters > 50 and iters % 50 == 0):
if (iters < 10) or (iters % 5 == 0):
# Snapshot the current performance across all tasks after each mini-batch
fbatch = test_task_sequence(model, sess, datasets[0]['test'], class_attr, classes_per_task, task_labels, task)
ftask.append(fbatch)
# Set the output labels over which the model needs to be trained
if model.imp_method == 'A-GEM':
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
else:
logit_mask[:] = 0
if do_sampling:
logit_mask[test_labels] = 1.0
else:
logit_mask[task_labels[task]] = 1.0
if train_single_epoch:
offset = iters * batch_size
if (offset+batch_size <= num_train_examples):
residual = batch_size
else:
residual = num_train_examples - offset
feed_dict = {model.x: train_x[offset:offset+residual], model.y_: train_y[offset:offset+residual],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+residual],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
else:
offset = (iters * batch_size) % (num_train_examples - batch_size)
feed_dict = {model.x: train_x[offset:offset+batch_size], model.y_: train_y[offset:offset+batch_size],
model.class_attr: masked_class_attrs,
model.sample_weights: task_sample_weights[offset:offset+batch_size],
model.training_iters: num_iters, model.train_step: iters, model.keep_prob: 0.5,
model.train_phase: True}
if model.imp_method == 'VAN':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'EWC':
feed_dict[model.output_mask] = logit_mask
# If first iteration of the first task then set the initial value of the running fisher
if task == 0 and iters == 0:
sess.run([model.set_initial_running_fisher], feed_dict=feed_dict)
# Update fisher after every few iterations
if (iters + 1) % model.fisher_update_after == 0:
sess.run(model.set_running_fisher)
sess.run(model.reset_tmp_fisher)
_, _, loss = sess.run([model.set_tmp_fisher, model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'PI':
feed_dict[model.output_mask] = logit_mask
_, _, _, loss = sess.run([model.weights_old_ops_grouped, model.train, model.update_small_omega,
model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'MAS':
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train, model.reg_loss], feed_dict=feed_dict)
elif model.imp_method == 'S-GEM':
if task == 0:
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
# Randomly sample a task from the previous tasks
prev_task = np.random.randint(0, task)
# Set the logit mask for the randomly sampled task
logit_mask[:] = 0
logit_mask[task_labels[prev_task]] = 1.0
prev_class_attrs = np.zeros_like(class_attr)
prev_class_attrs[task_labels[prev_task]] = class_attr[task_labels[prev_task]]
# Store the reference gradient
sess.run(model.store_ref_grads, feed_dict={model.x: task_based_memory[prev_task]['images'], model.y_: task_based_memory[prev_task]['labels'],
model.class_attr: prev_class_attrs,
model.keep_prob: 1.0, model.output_mask: logit_mask, model.train_phase: True})
# Compute the gradient for current task and project if need be
logit_mask[:] = 0
logit_mask[task_labels[task]] = 1.0
feed_dict[model.output_mask] = logit_mask
_, loss = sess.run([model.train_subseq_tasks, model.agem_loss], feed_dict=feed_dict)
elif model.imp_method == 'A-GEM':
if task == 0:
a_gem_logit_mask[:] = 0
a_gem_logit_mask[task][task_labels[task]] = 1.0
logit_mask_dict = {m_t: i_t for (m_t, i_t) in zip(model.output_mask, a_gem_logit_mask)}
feed_dict.update(logit_mask_dict)
feed_dict[model.mem_batch_size] = batch_size
# Normal application of gradients
_, loss = sess.run([model.train_first_task, model.agem_loss], feed_dict=feed_dict)
else:
## Compute and store the reference gradients on the previous tasks
# Set the mask for all the previous tasks so far
a_gem_logit_mask[:] = 0
for tt in range(task):
a_gem_logit_mask[tt][task_labels[tt]] = 1.0
if KEEP_EPISODIC_MEMORY_FULL:
mem_sample_mask = np.random.choice(episodic_mem_size, EPS_MEM_BATCH_SIZE, replace=False) # Sample without replacement so that we don't sample an example more than once
else:
if episodic_filled_counter <= EPS_MEM_BATCH_SIZE:
mem_sample_mask = | np.arange(episodic_filled_counter) | numpy.arange |
import numpy as np
import multiprocessing
import sys
import time
import matplotlib.pyplot as plt
# =============================================================================
# Distributed Computing Parameters
pool_size = multiprocessing.cpu_count()
# Genetic Circuit Hyperparameters
NODES = 3000
# Evolutionary Algorithm Hyperparameters
GENERATIONS = 201 # number of generations to run
# Other Hyperparameters
# STEP_MUTATION_RATE = 0.9
# BIG_STEP_MUTATION_RATE = 0.8
# RANDOM_MUTATION_RATE = 1
# SIGN_FLIP_MUTATION_RATE = 0.1
# REG_RATE = 0.0003 # regularization rate
STEP_SIZE = 2.0 # max mutation intensity of each weight
POPULATION = pool_size * 6 # total number of population
SURVIVABLE_PARENTS = POPULATION // 3 # number of parents to survive
# Novelty Search Hyperparameters
# KNN_BC_NUM = 1 # k nearest neighbors number for behavior characteristics
# ARCHIVE_STORING_RATE = 0.01
# ODE
TIME_STEPS = 300
BATCH_SIZE = 30 # Fully dividable by 3 recommended
# Score Constraints
ERROR_BOUND = 0.1 # percentage of error allowed (sigmoid bounds are +-1)
BANDPASS_BOUND = 0.3
# the absolute bound of each weight (very important)
# choose something close to sigmoid saturation is good (eg. 7.5+, 5 is not good, 10 is good)
BOUND = 13
# Parameters (Derived from hyperparameters)
DNA_SIZE = NODES * NODES
UPPER_BANDPASS_BOUND = 1 - BANDPASS_BOUND
COST_UPPER_BOUND = ERROR_BOUND * BATCH_SIZE
# =============================================================================
# Mean normalization
def standardize(population):
# as known as z-score normalization
# the other method being min-max normalization
for i, weights in enumerate(population):
mean = np.mean(weights)
std = np.std(weights)
population[i] = (weights - mean) / std
return population
# =============================================================================
# ODE & Simulations
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# FF Classifier
# Here, only the classical solution determinator is implemented
# def simulate_ode_original(W, N, B, S):
# dt = 0.01
# initial_val = 0.1 * np.ones([B, S]) # can we reuse this?
# input_val = np.linspace(0, 2, B).reshape(B, 1) * np.random.normal(
# loc=1.0, scale=0.0001, size=[N, B, S]) # can we reduce the redundants?
# input_val[:, :, 1:S] = 0.0
# output = initial_val + (
# sigmoid(np.matmul(initial_val, W)) - initial_val + input_val[0]) * dt
# # print(output)
# # HOW: create one time np.linspace(0, 2, B), mutate and reuse in for loop
# for i in range(1, N):
# output = output + (
# sigmoid(np.matmul(output, W)) - output + input_val[i]) * dt
# # print(output)
# return output
# input_initializer = np.linspace(0, 2, BATCH_SIZE).reshape(BATCH_SIZE, 1,)
# input_val[:, 0] = np.linspace(0, 2, BATCH_SIZE).reshape(BATCH_SIZE)
# print(np.random.normal(loc=1.0, scale=0.0001))
dt = 0.01
initial_val = 0.1 * np.ones([BATCH_SIZE, NODES])
input_val = np.zeros((BATCH_SIZE, NODES))
linspace_col = np.linspace(0, 2, BATCH_SIZE).reshape(BATCH_SIZE)
def simulate_ode(W, N, B, S):
# Insert one input and have three outputs
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 2] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, W)) - initial_val + input_val) * dt
)
for i in range(1, N):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 2] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = output + (sigmoid(np.matmul(output, W)) - output + input_val) * dt
# print(output)
return output
def plot_expressions(y, B):
b = np.linspace(1, B, B)
plt.title(f"{NODES} Nodes")
plt.plot(b, y[:, 0], "black", linewidth=2, label="Input Node #1")
plt.plot(b, y[:, 1], "saddlebrown", linewidth=2, label="Input Node #2")
for i in range(3, y.shape[1] - 1):
# plt.plot(b, y[:, i], 'g-', linewidth=2, label='Support Node')
plt.plot(b, y[:, i], "gray", linewidth=2)
plt.plot(b, y[:, -3], "b", linewidth=2, label="Output Node #3 - Switch")
plt.plot(b, y[:, -2], "g", linewidth=2, label="Output Node #2 - Valley")
plt.plot(b, y[:, -1], "r", linewidth=2, label="Output Node #1 - Bandpass")
plt.xlabel("Input Level")
plt.ylabel("Output Level")
plt.legend()
plt.show()
# =============================================================================
# Behavior characteristic distance mean calculator
# def population_novelty(population):
# pop_novelty = np.zeros(POPULATION)
# bc_distance = np.zeros(POPULATION)
# for i, weights in enumerate(population):
# for j, target in enumerate(population):
# bc_distance[j] = np.linalg.norm(weights - target)
# # only uses KNN_BC_NUM of bc_distance to calculate bc_dist_mean
# bc_distance.sort()
# pop_novelty[i] = np.mean(bc_distance[-KNN_BC_NUM:])
# return pop_novelty
# =============================================================================
# The forever (unforgettable) archive of most novel children in a generation
# Or another method: Prob 1% to store any children to archive
# archive = []
# =============================================================================
# Double mergesort sorting by alist
def double_mergesort(alist, blist):
# print("Splitting ",alist)
if len(alist) > 1:
mid = len(alist) // 2
lefthalf_a = alist[:mid]
lefthalf_b = blist[:mid]
righthalf_a = alist[mid:]
righthalf_b = blist[mid:]
double_mergesort(lefthalf_a, lefthalf_b)
double_mergesort(righthalf_a, righthalf_b)
i = 0
j = 0
k = 0
while i < len(lefthalf_a) and j < len(righthalf_a):
if lefthalf_a[i] < righthalf_a[j]:
alist[k] = lefthalf_a[i]
blist[k] = lefthalf_b[i]
i = i + 1
else:
alist[k] = righthalf_a[j]
blist[k] = righthalf_b[j]
j = j + 1
k = k + 1
while i < len(lefthalf_a):
alist[k] = lefthalf_a[i]
blist[k] = lefthalf_b[i]
i = i + 1
k = k + 1
while j < len(righthalf_a):
alist[k] = righthalf_a[j]
blist[k] = righthalf_b[j]
j = j + 1
k = k + 1
# =============================================================================
# Main functions
# Bandpass Determinator
# Determines whether the solution given is a bandpass
# so that you don't need the flags -> faster
def bandpass_determinator(y):
# here we check only one node
# it would be wise to check other nodes, to check if it is classical solution
starting_low_flag = False
middle_high_flag = False
ending_low_flag = False
for pt in y[:, -1]:
if not starting_low_flag:
if pt < BANDPASS_BOUND:
starting_low_flag = True
elif not middle_high_flag:
if pt > UPPER_BANDPASS_BOUND:
middle_high_flag = True
elif not ending_low_flag:
if pt < BANDPASS_BOUND: # something is wrong here
ending_low_flag = True
else:
if pt > BANDPASS_BOUND:
ending_low_flag = False
# print(starting_low_flag, middle_high_flag, ending_low_flag)
return starting_low_flag and middle_high_flag and ending_low_flag
# Bandpass Cost function (for objective based selection method, the lower the better)
# Assume pt size is dividable by three
bandpass_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.5,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.5,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
bandpass_design = np.array(bandpass_design)
def bandpass_cost_calculator(y, B):
cost = np.sum(np.abs(y - bandpass_design))
return cost
def switch_cost_calculator(y, B):
cost = 0
for pt in y[: B // 2]:
cost += np.absolute(pt - 0)
for put in y[B // 2 :]:
cost += np.absolute(1 - pt)
return cost
def linear_cost_calculator(y, B):
B -= 1
cost = 0
for i, pt in enumerate(y):
cost += np.absolute(pt - (i / B))
return cost
peak_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.125,
0.25,
0.375,
0.5,
0.625,
0.75,
0.875,
1.0,
1.0,
0.875,
0.75,
0.625,
0.5,
0.375,
0.25,
0.125,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
peak_design = np.array(peak_design)
def peak_cost_calculator(y, B):
# Experiment failed: Made a mountain instead, much easier than bandpass...
cost = np.sum(np.abs(y - peak_design))
return cost
cosine_design = [
1.0,
0.9766205557100867,
0.907575419670957,
0.7960930657056438,
0.6473862847818277,
0.46840844069979015,
0.26752833852922075,
0.05413890858541761,
-0.16178199655276473,
-0.37013815533991445,
-0.5611870653623823,
-0.7259954919231308,
-0.8568571761675893,
-0.9476531711828025,
-0.9941379571543596,
-0.9941379571543596,
-0.9476531711828025,
-0.8568571761675892,
-0.7259954919231307,
-0.5611870653623825,
-0.37013815533991445,
-0.16178199655276476,
0.05413890858541758,
0.267528338529221,
0.4684084406997903,
0.6473862847818279,
0.796093065705644,
0.9075754196709569,
0.9766205557100867,
1.0,
]
cosine_design = np.array(cosine_design)
def cosine_cost_calculator(y, B):
cost = np.sum(np.abs(y - cosine_design))
return cost
# valley_design = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.9458172417006346, 0.7891405093963936, 0.546948158122427, 0.24548548714079924, -0.08257934547233227, -0.40169542465296926, -0.6772815716257409, -0.879473751206489, -0.9863613034027223, -0.9863613034027224, -0.8794737512064891, -0.6772815716257414, -0.40169542465296987, -0.08257934547233274, 0.2454854871407988, 0.5469481581224266, 0.7891405093963934, 0.9458172417006346, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
# valley_design = 1 - bandpass_design
# valley_design = 1 - peak_design
def valley_cost_calculator(y, B):
cost = np.sum(np.abs(y - valley_design))
return cost
bandpass_reversed_design = 1 - bandpass_design
def bandpass_reversed_cost_calculator(y, B):
cost = np.sum(np.abs(y - bandpass_reversed_design))
return cost
# def adaptation_cost_calculator(y, B):
# cost = 0
# ADAPTED_LEVEL = 0.1
# for pt in y[:B // 3]:
# cost += np.absolute(pt - 0)
# slice = ((1- ADAPTED_LEVEL) / (B//3))
# for i, pt in enumerate(y[B // 3:2 * B // 3]):
# cost += np.absolute(1 - i * slice) * 3
# print(1 - i * slice)
# sys.exit()
# for pt in y[2 * B // 3:]:
# cost += np.absolute(pt - ADAPTED_LEVEL)
# return cost
adaptation_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.5,
1.0,
0.5,
0.25,
0.125,
0.0625,
0.03125,
0.015625,
0.0078125,
0.00390625,
0.001953125,
0.0009765625,
0.00048828125,
0.000244140625,
0.0001220703125,
6.103515625e-05,
3.0517578125e-05,
1.52587890625e-05,
7.62939453125e-06,
3.814697265625e-06,
1.9073486328125e-06,
]
adaptation_design = np.array(adaptation_design)
def adaptation_cost_calculator(y, B):
cost = 0
# for i, pt in enumerate(y):
# cost += np.absolute(pt - adaptation_design[i])
cost = np.sum(np.abs(y - adaptation_design))
return cost
# # def adaptation_cost_calculator(y, B):
# cost = 0
# for pt in y[:B // 3]:
# cost += np.absolute(pt - 0)
# for pt in y[B // 3:2 * B // 3]:
# cost += np.absolute(1 - pt)
# for pt in y[2 * B // 3:]:
# cost += np.absolute(pt - 0.5)
# return cost
# Fitness based
cost_storage = [-1] * POPULATION
# def select(population):
# for i, potential_parent in enumerate(population):
# y = simulate_ode(potential_parent, TIME_STEPS, BATCH_SIZE, NODES)
# # Multiple outputs
# cost_storage[i] = bandpass_cost_calculator(y[:, -1], BATCH_SIZE) * 1.5
# cost_storage[i] += switch_cost_calculator(y[:, -2], BATCH_SIZE) * 1.25
# # cost_storage[i] = adaptation_cost_calculator(y[:, -1], BATCH_SIZE)
# cost_storage[i] += linear_cost_calculator(y[:, -3], BATCH_SIZE)
# cost_storage[i] /= 3
# # cost_storage[i] += REG_RATE * sum(sum(abs(potential_parent))) # regularization
# double_mergesort(cost_storage, population)
# y = simulate_ode(population[0], TIME_STEPS, BATCH_SIZE, NODES)
# print("Bandpass Cost:", bandpass_cost_calculator(y[:, -1], BATCH_SIZE))
# print("Switch Cost:", switch_cost_calculator(y[:, -2], BATCH_SIZE))
# print("Linear Cost:", linear_cost_calculator(y[:, -3], BATCH_SIZE))
# # print(cost_storage[0])
# survivors = population[:SURVIVABLE_PARENTS]
# survivors = np.append(survivors, survivors, axis=0)
# # repopulated_parents = np.append(repopulated_parents, survivors, axis=0)
# # random_children = np.random.uniform(-BOUND, BOUND, (SURVIVABLE_PARENTS, NODES, NODES))
# # survivors = np.append(repopulated_parents, random_children, axis=0)
# # print(repopulated_parents)
# return survivors, population[0], cost_storage[0]
# def select(population):
# # Harmonic Version - Mitigate Impact of Outliers
# for i, potential_parent in enumerate(population):
# y = simulate_ode(potential_parent, TIME_STEPS, BATCH_SIZE, NODES)
# # Multiple outputs
# f_bandpass = BATCH_SIZE - bandpass_cost_calculator(y[:, -1], BATCH_SIZE)
# f_switch = BATCH_SIZE - switch_cost_calculator(y[:, -2], BATCH_SIZE)
# f_linear = BATCH_SIZE - linear_cost_calculator(y[:, -3], BATCH_SIZE)
# cost_storage[i] = BATCH_SIZE - 3 / (((1/f_bandpass) + (1/f_switch) + (1/f_linear)))
# # cost_storage[i] += REG_RATE * sum(sum(abs(potential_parent))) # regularization
# # cost_storage[i] = f_bandpass + f_switch + f_linear
# double_mergesort(cost_storage, population)
# y = simulate_ode(population[0], TIME_STEPS, BATCH_SIZE, NODES)
# print("Bandpass Cost:", bandpass_cost_calculator(y[:, -1], BATCH_SIZE))
# print("Switch Cost:", switch_cost_calculator(y[:, -2], BATCH_SIZE))
# print("Linear Cost:", linear_cost_calculator(y[:, -3], BATCH_SIZE))
# # print(cost_storage[0])
# survivors = population[:SURVIVABLE_PARENTS]
# survivors = np.append(survivors, survivors, axis=0)
# # repopulated_parents = np.append(repopulated_parents, survivors, axis=0)
# # random_children = np.random.uniform(-BOUND, BOUND, (SURVIVABLE_PARENTS, NODES, NODES))
# # survivors = np.append(repopulated_parents, random_children, axis=0)
# # print(repopulated_parents)
# return survivors, population[0], cost_storage[0]
# def select(population):
# # Square Version - Aggravate Impact of Outliers
# for i, potential_parent in enumerate(population):
# y = simulate_ode(potential_parent, TIME_STEPS, BATCH_SIZE, NODES)
# # Multiple outputs
# f_bandpass = bandpass_cost_calculator(y[:, -1], BATCH_SIZE)
# f_bandpass_reversed = bandpass_reversed_cost_calculator(y[:, -2], BATCH_SIZE)
# f_switch = switch_cost_calculator(y[:, -3], BATCH_SIZE)
# # f_valley = valley_cost_calculator(y[:, -3], BATCH_SIZE)
# # f_linear = linear_cost_calculator(y[:, -3], BATCH_SIZE)
# # cost_storage[i] = valley_cost_calculator(y[:, -1], BATCH_SIZE)
# # cost_storage[i] = peak_cost_calculator(y[:, -1], BATCH_SIZE)
# # cost_storage[i] = bandpass_cost_calculator(y[:, -1], BATCH_SIZE)
# cost_storage[i] = f_bandpass**2 + f_switch**2 + f_bandpass_reversed**2
# # cost_storage[i] += REG_RATE * sum(sum(abs(potential_parent))) # regularization
# # cost_storage[i] = f_bandpass + f_switch + f_linear
# double_mergesort(cost_storage, population)
# y = simulate_ode(population[0], TIME_STEPS, BATCH_SIZE, NODES)
# print("Bandpass Cost:", bandpass_cost_calculator(y[:, -1], BATCH_SIZE))
# print("Valley Cost:", bandpass_reversed_cost_calculator(y[:, -2], BATCH_SIZE))
# print("Switch Cost:", switch_cost_calculator(y[:, -3], BATCH_SIZE))
# # print("Valley Cost:", valley_cost_calculator(y[:, -3], BATCH_SIZE))
# # print("Linear Cost:", linear_cost_calculator(y[:, -3], BATCH_SIZE))
# # print(cost_storage[0])
# survivors = population[:SURVIVABLE_PARENTS]
# survivors = np.append(survivors, survivors, axis=0)
# # repopulated_parents = np.append(repopulated_parents, survivors, axis=0)
# # random_children = np.random.uniform(-BOUND, BOUND, (SURVIVABLE_PARENTS, NODES, NODES))
# # survivors = np.append(repopulated_parents, random_children, axis=0)
# # print(repopulated_parents)
# return survivors, population[0], cost_storage[0]
def select(population):
for i, potential_parent in enumerate(population):
f_bandpass = simulate_and_cost_bandpass(potential_parent)
f_bandpass_reversed = simulate_and_cost_bandpass_reversed(potential_parent)
f_switch = simulate_and_cost_switch(potential_parent)
cost_storage[i] = f_bandpass ** 2 + f_bandpass_reversed ** 2 + f_switch ** 2
double_mergesort(cost_storage, population)
survivors = population[:SURVIVABLE_PARENTS]
survivors = np.append(survivors, survivors, axis=0)
return survivors, population[0], cost_storage[0]
def plot(y):
b = np.linspace(1, BATCH_SIZE, BATCH_SIZE)
plt.title(f"{NODES} Nodes")
plt.plot(b, y[:, 0], "black", linewidth=2, label="Input Node #1")
plt.plot(b, y[:, 1], "saddlebrown", linewidth=2, label="Input Node #2")
for i in range(2, y.shape[1] - 1):
# plt.plot(b, y[:, i], 'g-', linewidth=2, label='Support Node')
plt.plot(b, y[:, i], "gray", linewidth=2)
plt.plot(b, y[:, -1], "r", linewidth=2, label="Multifunction Output Node")
plt.xlabel("Input Level")
plt.ylabel("Output Level")
plt.legend()
plt.show()
def simulate_and_cost_bandpass(individual):
# Encode <- 0, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
cost = np.sum(np.abs(output[:, -1] - bandpass_design))
return cost
def simulate_and_cost_bandpass_reversed(individual):
# Encode <- 1, 0
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
cost = np.sum(np.abs(output[:, -1] - bandpass_reversed_design))
return cost
switch_design = [
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
]
switch_design = np.array(switch_design)
def simulate_and_cost_switch(individual):
# Encode <- 1, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
input_val[:, 1] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
cost = np.sum(np.abs(output[:, -1] - switch_design))
return cost
def simulate_plot_cost_bandpass(individual):
# Encode <- 0, 1
input_val = np.zeros((BATCH_SIZE, NODES))
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
initial_val
+ (sigmoid(np.matmul(initial_val, individual)) - initial_val + input_val) * dt
)
for i in range(1, TIME_STEPS):
input_val[:, 0] = linspace_col * np.random.normal(loc=1.0, scale=0.0001)
output = (
output + (sigmoid(np.matmul(output, individual)) - output + input_val) * dt
)
plot(output)
def simulate_and_plot_bandpass_reversed(individual):
# Encode <- 1, 0
input_val = | np.zeros((BATCH_SIZE, NODES)) | numpy.zeros |
# COVID dataset input readers
#
# <EMAIL>, 2020
import sys
import numpy as np
from datetime import datetime,timedelta
from termcolor import colored
import os
import pandas as pd
#from datetime import datetime
#a = datetime.strptime(dt[0], '%Y-%m-%d')
def todiff(series):
"""
Turn cumulative series into differential
"""
series = np.diff(series, prepend=0)
# Fix possible NaN
series[~np.isfinite(series)] = 0
# Fix possible errors in data (cumulative were not monotonic)
ind = series < 0
if np.sum(series[ind]) != 0:
print(colored(f'{__name__}.todiff: fixing non-monotonic input (negative dx set to 0)', 'red'))
print(series)
series[ind] = 0
return series
def data_processor(meta):
"""
Dataset processor wrapper
"""
evalstr = f"{meta['function']}(meta)"
print(evalstr)
try:
d = eval(evalstr)
return d
except:
print(__name__ + f".data_processor: {colored('Failed to process','yellow')} {meta['isocode']}")
print(f'Error: {sys.exc_info()[0]} {sys.exc_info()[1]}')
def get_isocodes():
isodata = pd.read_csv('./data/iso.csv', comment='#')
code = np.array(isodata['code'])
return code
def get_european_isocodes():
isodata = pd.read_csv('./data/iso.csv', comment='#')
code = np.array(isodata['code'])
continent = np.array(isodata['continent'])
return code[continent == 4] # Europe only
def data_reader_swiss(meta):
"""
Swiss data format reader
"""
# --------------------------------------------------------------------
# DEATHS
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
d = {}
d['dt'] = np.array(df["Date"])
# Turn cumulative into daily
d['deaths'] = todiff(df[meta['region']])
# --------------------------------------------------------------------
# Cases
df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
# Turn cumulative into daily
d['cases'] = todiff(df[meta['region']])
# --------------------------------------------------------------------
# Tests
df = pd.read_csv('./data/' + meta['filename_tested'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
# Turn cumulative into daily
d['tests'] = todiff(df[meta['region']])
# --------------------------------------------------------------------
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# --------------------------------------------------------------------
if (len(d['deaths']) != len(d['cases'])):
raise Exception(__name__ + '.data_reader_swiss: len(deaths) != len(cases)')
if (len(d['cases']) != len(d['tests'])):
raise Exception(__name__ + '.data_reader_swiss: len(cases) != len(tests)')
return d
def data_reader_sweden(meta):
d = {}
d['isocode'] = meta['isocode']
d['population'] = meta['population']
df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
df = df.loc[df["Region"] == meta['region']]
# --------------------------------------------------------------------
# Iterating the columns, find date columns
dt=list()
for col in df.columns:
if "2020-" in col:
dt.append(col)
d['dt'] = dt
# --------------------------------------------------------------------
# Cases
d['cases'] = np.array(df[dt])[0]
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
df = df.loc[df["Region"] == meta['region']]
d['deaths'] = np.array(df[dt])[0]
# --------------------------------------------------------------------
# Tests
# ** NOT AVAILABLE **
d['tests'] = np.zeros(len(dt))*np.nan
return d
def data_reader_usa(meta):
d = {}
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename'], comment='#')
df = df.loc[df["county"] == meta['region']]
d['dt'] = np.array(df['date'])
d['deaths'] = todiff(df['deaths'])
# --------------------------------------------------------------------
# Cases
d['cases'] = todiff(df['cases'])
# --------------------------------------------------------------------
# Tests
d['tests'] = np.zeros(len(d['dt']))*np.nan
return d
def data_reader_heinsberg(meta):
d = {}
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# Cases data
#df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
#data = df.loc[df["county"] == meta['region']]
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
d['dt'] = np.array(df['date'])
d['deaths'] = np.array(df['deaths'])
# --------------------------------------------------------------------
# Cases
d['cases'] = np.zeros(len(d['dt']))*np.nan
# --------------------------------------------------------------------
# Tests
d['tests'] = np.zeros(len(d['dt']))*np.nan
return d
def data_reader_florida(meta):
d = {}
d['population'] = meta['population']
d['isocode'] = meta['isocode']
# Cases data
#df = pd.read_csv('./data/' + meta['filename_cases'], comment='#')
#data = df.loc[df["county"] == meta['region']]
# --------------------------------------------------------------------
# Deaths
df = pd.read_csv('./data/' + meta['filename_deaths'], comment='#')
d['dt'] = np.array(df['date'])
d['deaths'] = np.array(df['deaths'])
# --------------------------------------------------------------------
# Cases
d['cases'] = np.zeros(len(d['dt']))*np.nan #np.array(data["frequency"])
# --------------------------------------------------------------------
# Tests
d['tests'] = np.zeros(len(d['dt']))*np.nan
return d
def data_reader_LA(meta):
"""
LA County data format reader
"""
df = pd.read_csv('./data/' + meta['filename'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
d = {}
d['dt'] = np.array(df["date_dt"])
d['cases'] = np.array(df["new_case"])
d['deaths'] = np.array(df["new_deaths"])
d['tests'] = np.array(df['new_persons_tested'])
d['population'] = meta['population']
d['isocode'] = meta['isocode']
return d
def data_reader_OWID(meta):
"""
World-in-data format reader
"""
df = pd.read_csv('./data/' + meta['filename'], comment='#')
df = df.sort_index(ascending=meta['ascending'], axis=0)
# Take the isocode
data = df.loc[df["iso_code"] == meta['isocode']]
d = {}
d['dt'] = np.array(data["date"])
d['cases'] = np.array(data["new_cases"])
d['deaths'] = np.array(data["new_deaths"])
d['tests'] = | np.array(data["new_tests_smoothed"]) | numpy.array |
#######################################################
#Reference: https://github.com/experiencor/keras-yolo3#
#######################################################
import numpy as np
import os
import cv2
from scipy.special import expit
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, c = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.c = c
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def get_box(self):
return (self.xmin, self.ymin, self.xmax, self.ymax)
def _sigmoid(x):
return expit(x)
def _softmax(x, axis=-1):
x = x - np.amax(x, axis, keepdims=True)
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
def preprocess_input(img, w, h):
ih, iw, _ = img.shape
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image_data = cv2.resize(img, (nw,nh))
new_image = np.full((h,w,3), (128,128,128), dtype='uint8')
new_image[(h-nh)//2 : (h+nh)//2, (w-nw)//2:(w+nw)//2] = image_data
image_data = new_image.astype('float')/255.0
image_data = image_data[np.newaxis, ...]
return image_data
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4] = _sigmoid(netout[..., 4])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:])
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i // grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[row, col, b, 4]
if(objectness <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[row,col,b,:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[row,col,b,5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
boxes.append(box)
return boxes
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return []
for c in range(nb_class):
sorted_indices = | np.argsort([-box.classes[c] for box in boxes]) | numpy.argsort |
'''Module for additional computations required by the model'''
from numpy import (
arange, array, atleast_2d, concatenate, copy, cumprod, diag, isnan, ix_,
ones, shape, sum, where, zeros)
from numpy import int64 as my_int
import pdb
from scipy.sparse import csc_matrix as sparse
from model.imports import NoImportModel
from model.subsystems import subsystem_key
def state_recursor(
states,
no_compartments,
age_class,
b_size,
n_blocks,
con_reps,
c,
x,
depth,
k):
if depth < no_compartments-1:
for x_i in arange(c + 1 - x.sum()):
x[0, depth] = x_i
x[0, depth+1:] = zeros(
(1, no_compartments-depth-1),
dtype=my_int)
states, k = state_recursor(
states,
no_compartments,
age_class,
b_size,
n_blocks,
con_reps,
c,
x,
depth+1,
k)
else:
x[0, -1] = c - sum(x[0, :depth])
for block in arange(n_blocks):
repeat_range = arange(
block * b_size
+ k * con_reps,
block * b_size +
(k + 1) * con_reps)
states[repeat_range, no_compartments*age_class:no_compartments*(age_class+1)] = \
ones(
(con_reps, 1),
dtype=my_int) \
* array(
x,
ndmin=2, dtype=my_int)
k += 1
return states, k
return states, k
def build_states_recursively(
total_size,
no_compartments,
classes_present,
block_size,
num_blocks,
consecutive_repeats,
composition):
states = zeros(
(total_size, no_compartments*len(classes_present)),
dtype=my_int)
for age_class in range(len(classes_present)):
k = 0
states, k = state_recursor(
states,
no_compartments,
age_class,
block_size[age_class],
num_blocks[age_class],
consecutive_repeats[age_class],
composition[classes_present[age_class]],
zeros([1, no_compartments], dtype=my_int),
0,
k)
return states, k
def build_state_matrix(household_spec):
# Number of times you repeat states for each configuration
consecutive_repeats = concatenate((
ones(1, dtype=my_int), cumprod(household_spec.system_sizes[:-1])))
block_size = consecutive_repeats * household_spec.system_sizes
num_blocks = household_spec.total_size // block_size
states, k = build_states_recursively(
household_spec.total_size,
household_spec.no_compartments,
household_spec.class_indexes,
block_size,
num_blocks,
consecutive_repeats,
household_spec.composition)
# Now construct a sparse vector which tells you which row a state appears
# from in the state array
# This loop tells us how many values each column of the state array can
# take
state_sizes = concatenate([
(household_spec.composition[i] + 1)
* ones(household_spec.no_compartments, dtype=my_int)
for i in household_spec.class_indexes]).ravel()
# This vector stores the number of combinations you can get of all
# subsequent elements in the state array, i.e. reverse_prod(i) tells you
# how many arrangements you can get in states(:,i+1:end)
reverse_prod = array([0, *cumprod(state_sizes[:0:-1])])[::-1]
# We can then define index_vector look up the location of a state by
# weighting its elements using reverse_prod - this gives a unique mapping
# from the set of states to the integers. Because lots of combinations
# don't actually appear in the states array, we use a sparse array which
# will be much bigger than we actually require
rows = [
states[k, :].dot(reverse_prod) + states[k, -1]
for k in range(household_spec.total_size)]
if min(rows) < 0:
print(
'Negative row indices found, proportional total',
sum(array(rows) < 0),
'/',
len(rows),
'=',
sum(array(rows) < 0) / len(rows))
index_vector = sparse((
arange(household_spec.total_size),
(rows, [0]*household_spec.total_size)))
return states, reverse_prod, index_vector, rows
def within_household_spread(
composition, model_input):
'''Assuming frequency-dependent homogeneous within-household mixing
composition[i] is the number of individuals in age-class i inside the
household'''
sus = model_input.sus
det = model_input.det
tau = model_input.tau
K_home = model_input.k_home
alpha = model_input.alpha
gamma = model_input.gamma
# Set of individuals actually present here
classes_present = where(composition.ravel() > 0)[0]
K_home = K_home[ix_(classes_present, classes_present)]
sus = sus[classes_present]
det = det[classes_present]
tau = tau[classes_present]
r_home = atleast_2d(diag(sus).dot(K_home))
states, total_size, reverse_prod, index_vector, rows = build_state_matrix(composition, classes_present, 5)
d_pos = 2 + 5 * arange(len(classes_present))
u_pos = 3 + 5 * arange(len(classes_present))
Q_int = sparse((total_size, total_size))
inf_event_row = array([], dtype=my_int)
inf_event_col = array([], dtype=my_int)
inf_event_class = array([], dtype=my_int)
# Add events for each age class
for i in range(len(classes_present)):
s_present = where(states[:, 5*i] > 0)[0]
e_present = where(states[:, 5*i+1] > 0)[0]
d_present = where(states[:, 5*i+2] > 0)[0]
u_present = where(states[:, 5*i+3] > 0)[0]
# First do infection events
inf_to = zeros(len(s_present), dtype=my_int)
inf_rate = zeros(len(s_present))
for k in range(len(s_present)):
old_state = copy(states[s_present[k], :])
inf_rate[k] = old_state[5*i] * (
r_home[i, :].dot(
(old_state[d_pos] / composition[classes_present])
+ (old_state[u_pos] / composition[classes_present]) * tau))
new_state = old_state.copy()
new_state[5*i] -= 1
new_state[5*i + 1] += 1
inf_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inf_rate, (s_present, inf_to)),
shape=(total_size, total_size))
inf_event_row = concatenate((inf_event_row, s_present))
inf_event_col = concatenate((inf_event_col, inf_to))
inf_event_class = concatenate(
(inf_event_class, classes_present[i]*ones((len(s_present)))))
# input('Press enter to continue')
# # disp('Infection events done')
# # Now do exposure to detected or undetected
det_to = zeros(len(e_present), dtype=my_int)
det_rate = zeros(len(e_present))
undet_to = zeros(len(e_present), dtype=my_int)
undet_rate = zeros(len(e_present))
for k in range(len(e_present)):
# First do detected
old_state = copy(states[e_present[k], :])
det_rate[k] = det[i] * alpha * old_state[5*i+1]
new_state = copy(old_state)
new_state[5*i + 1] -= 1
new_state[5*i + 2] += 1
det_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
# First do undetectednt(k),:)
undet_rate[k] = (1.0 - det[i]) * alpha * old_state[5*i+1]
new_state = copy(old_state)
new_state[5*i + 1] -= 1
new_state[5*i + 3] += 1
undet_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(det_rate, (e_present, det_to)),
shape=(total_size, total_size))
Q_int += sparse(
(undet_rate, (e_present, undet_to)),
shape=(total_size, total_size))
# # disp('Incubaion events done')
# Now do recovery of detected cases
rec_to = zeros(len(d_present), dtype=my_int)
rec_rate = zeros(len(d_present))
for k in range(len(d_present)):
old_state = copy(states[d_present[k], :])
rec_rate[k] = gamma * old_state[5*i+2]
new_state = copy(old_state)
new_state[5*i+2] -= 1
new_state[5*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(rec_rate, (d_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from detecteds done')
# Now do recovery of undetected cases
rec_to = zeros(len(u_present), dtype=my_int)
rec_rate = zeros(len(u_present))
for k in range(len(u_present)):
old_state = copy(states[u_present[k], :])
rec_rate[k] = gamma*old_state[5*i+3]
new_state = copy(old_state)
new_state[5*i+3] -= 1
new_state[5*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) +new_state[-1], 0]
Q_int = Q_int + sparse(
(rec_rate, (u_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from undetecteds done')
S = Q_int.sum(axis=1).getA().squeeze()
Q_int += sparse((
-S, (arange(total_size), arange(total_size))))
return \
Q_int, states, \
array(inf_event_row, dtype=my_int, ndmin=1), \
array(inf_event_col, dtype=my_int, ndmin=1), \
array(inf_event_class, dtype=my_int, ndmin=1)
def within_household_SEDURQ(
composition, model_input):
'''Assuming frequency-dependent homogeneous within-household mixing
composition[i] is the number of individuals in age-class i inside the
household'''
sus = model_input.sigma
det = model_input.det
tau = model_input.tau
K_home = model_input.k_home
alpha = model_input.alpha
gamma = model_input.gamma
D_iso_rate = model_input.D_iso_rate
U_iso_rate = model_input.U_iso_rate
discharge_rate = model_input.discharge_rate
adult_bd = model_input.adult_bd
class_is_isolating = model_input.class_is_isolating
# Set of individuals actually present here
classes_present = where(composition.ravel() > 0)[0]
# Check number of adults and whether children_present
no_adults = sum(composition[adult_bd:])
children_present = sum(composition[:adult_bd])>0
K_home = K_home[ix_(classes_present, classes_present)]
sus = sus[classes_present]
det = det[classes_present]
tau = tau[classes_present]
r_home = atleast_2d(diag(sus).dot(K_home))
states, total_size, reverse_prod, index_vector, rows = build_state_matrix(composition, classes_present, 6)
d_pos = 2 + 6 * arange(len(classes_present))
u_pos = 3 + 6 * arange(len(classes_present))
iso_pos = 5 + 6 * arange(len(classes_present))
iso_adjusted_comp = composition[classes_present] - states[:,iso_pos] # This is number of people of each age class present in the household given some may isolate
iso_adjusted_comp[iso_adjusted_comp==0] = 1 # Replace zeros with ones - we only ever use this as a denominator whose numerator will be zero anyway if it should be zero
if (iso_adjusted_comp<1).any():
pdb.set_trace()
adults_isolating = states[:,6*adult_bd+5::6].sum(axis=1) # Number of adults isolating by state
Q_int = sparse((total_size, total_size))
inf_event_row = array([], dtype=my_int)
inf_event_col = array([], dtype=my_int)
inf_event_class = array([], dtype=my_int)
# Add events for each age class
for i in range(len(classes_present)):
s_present = where(states[:, 6*i] > 0)[0]
e_present = where(states[:, 6*i+1] > 0)[0]
d_present = where(states[:, 6*i+2] > 0)[0]
u_present = where(states[:, 6*i+3] > 0)[0]
# First do infection events
inf_to = zeros(len(s_present), dtype=my_int)
inf_rate = zeros(len(s_present))
for k in range(len(s_present)):
old_state = copy(states[s_present[k], :])
inf_rate[k] = old_state[6*i] * (
r_home[i, :].dot(
(old_state[d_pos] / iso_adjusted_comp[k])
+ (old_state[u_pos] / iso_adjusted_comp[k]) * tau))
new_state = old_state.copy()
new_state[6*i] -= 1
new_state[6*i + 1] += 1
inf_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inf_rate, (s_present, inf_to)),
shape=(total_size, total_size))
inf_event_row = concatenate((inf_event_row, s_present))
inf_event_col = concatenate((inf_event_col, inf_to))
inf_event_class = concatenate(
(inf_event_class, classes_present[i]*ones((len(s_present)))))
# input('Press enter to continue')
# # disp('Infection events done')
# # Now do exposure to detected or undetected
det_to = zeros(len(e_present), dtype=my_int)
det_rate = zeros(len(e_present))
undet_to = zeros(len(e_present), dtype=my_int)
undet_rate = zeros(len(e_present))
for k in range(len(e_present)):
# First do detected
old_state = copy(states[e_present[k], :])
det_rate[k] = det[i] * alpha * old_state[6*i+1]
new_state = copy(old_state)
new_state[6*i + 1] -= 1
new_state[6*i + 2] += 1
det_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
# First do undetectednt(k),:)
undet_rate[k] = (1.0 - det[i]) * alpha * old_state[6*i+1]
new_state = copy(old_state)
new_state[6*i + 1] -= 1
new_state[6*i + 3] += 1
undet_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(det_rate, (e_present, det_to)),
shape=(total_size, total_size))
Q_int += sparse(
(undet_rate, (e_present, undet_to)),
shape=(total_size, total_size))
# # disp('Incubaion events done')
# Now do recovery of detected cases
rec_to = zeros(len(d_present), dtype=my_int)
rec_rate = zeros(len(d_present))
for k in range(len(d_present)):
old_state = copy(states[d_present[k], :])
rec_rate[k] = gamma * old_state[6*i+2]
new_state = copy(old_state)
new_state[6*i+2] -= 1
new_state[6*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(rec_rate, (d_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from detecteds done')
# Now do recovery of undetected cases
rec_to = zeros(len(u_present), dtype=my_int)
rec_rate = zeros(len(u_present))
for k in range(len(u_present)):
old_state = copy(states[u_present[k], :])
rec_rate[k] = gamma*old_state[6*i+3]
new_state = copy(old_state)
new_state[6*i+3] -= 1
new_state[6*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) +new_state[-1], 0]
Q_int = Q_int + sparse(
(rec_rate, (u_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from undetecteds done')
#Now do isolation
if (class_is_isolating[i,classes_present]).any():
if (i<adult_bd) or not children_present: # If i is a child class or there are no children around, anyone can isolate
d_can_isolate = d_present
u_can_isolate = u_present
else: # If children are present adults_isolating must stay below no_adults-1 so the children still have a guardian
d_can_isolate = where((states[:, 6*i+2] > 0)*(adults_isolating<no_adults-1))[0]
u_can_isolate = where((states[:, 6*i+3] > 0)*(adults_isolating<no_adults-1))[0]
iso_present = where(states[:, 6*i+5] > 0)[0]
# Isolation of detected cases
iso_to = zeros(len(d_can_isolate), dtype=my_int)
iso_rate = zeros(len(d_can_isolate))
for k in range(len(d_can_isolate)):
old_state = copy(states[d_can_isolate[k], :])
iso_rate[k] = D_iso_rate * old_state[6*i+2]
new_state = copy(old_state)
new_state[6*i+2] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (d_can_isolate, iso_to)),
shape=(total_size, total_size))
# Isolation of undetected cases
iso_to = zeros(len(u_can_isolate), dtype=my_int)
iso_rate = zeros(len(u_can_isolate))
for k in range(len(u_can_isolate)):
old_state = copy(states[u_can_isolate[k], :])
iso_rate[k] = U_iso_rate * old_state[6*i+3]
new_state = copy(old_state)
new_state[6*i+3] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (u_can_isolate, iso_to)),
shape=(total_size, total_size))
# Return home of isolated cases
return_to = zeros(len(iso_present), dtype=my_int)
return_rate = zeros(len(iso_present))
for k in range(len(iso_present)):
old_state = copy(states[iso_present[k], :])
return_rate[k] = discharge_rate * old_state[6*i+5]
new_state = copy(old_state)
new_state[6*i+5] -= 1
new_state[6*i+4] += 1
return_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(return_rate, (iso_present, return_to)),
shape = (total_size,total_size))
S = Q_int.sum(axis=1).getA().squeeze()
Q_int += sparse((
-S, (arange(total_size), arange(total_size))))
return \
Q_int, states, \
array(inf_event_row, dtype=my_int, ndmin=1), \
array(inf_event_col, dtype=my_int, ndmin=1), \
array(inf_event_class, dtype=my_int, ndmin=1)
def within_household_SEPIRQ(
composition, model_input):
'''Assuming frequency-dependent homogeneous within-household mixing
composition[i] is the number of individuals in age-class i inside the
household'''
sus = model_input.sus
tau = model_input.tau
K_home = model_input.k_home
alpha_1 = model_input.alpha_1
alpha_2 = model_input.alpha_2
gamma = model_input.gamma
E_iso_rate = model_input.E_iso_rate
P_iso_rate = model_input.P_iso_rate
I_iso_rate = model_input.I_iso_rate
discharge_rate = model_input.discharge_rate
adult_bd = model_input.adult_bd
class_is_isolating = model_input.class_is_isolating
iso_method = model_input.iso_method # Set to 0 if isolating externaly, 1 if isolating internally
tau_Q = (tau/alpha_2 + 1/gamma)/(1/alpha_1+1/alpha_2+1/gamma) # Scaling for infection from quarantined cases
# Set of individuals actually present here
classes_present = where(composition.ravel() > 0)[0]
# Check number of adults and whether children_present
no_adults = sum(composition[adult_bd:])
children_present = sum(composition[:adult_bd])>0
K_home = K_home[ix_(classes_present, classes_present)]
sus = sus[classes_present]
tau = tau[classes_present]
tau_Q = tau_Q[classes_present]
r_home = atleast_2d(diag(sus).dot(K_home))
states, total_size, reverse_prod, index_vector, rows = build_state_matrix(composition, classes_present, 6)
p_pos = 2 + 6 * arange(len(classes_present))
i_pos = 3 + 6 * arange(len(classes_present))
iso_pos = 5 + 6 * arange(len(classes_present))
iso_adjusted_comp = composition[classes_present] - (1-iso_method)*states[:,iso_pos] # This is number of people of each age class present in the household given some may isolate
iso_adjusted_comp[iso_adjusted_comp==0] = 1 # Replace zeros with ones - we only ever use this as a denominator whose numerator will be zero anyway if it should be zero
if (iso_adjusted_comp<1).any():
pdb.set_trace()
adults_isolating = states[:,6*adult_bd+5::6].sum(axis=1) # Number of adults isolating by state
Q_int = sparse((total_size, total_size))
inf_event_row = array([], dtype=my_int)
inf_event_col = array([], dtype=my_int)
inf_event_class = array([], dtype=my_int)
# Add events for each age class
for i in range(len(classes_present)):
s_present = where(states[:, 6*i] > 0)[0]
e_present = where(states[:, 6*i+1] > 0)[0]
p_present = where(states[:, 6*i+2] > 0)[0]
i_present = where(states[:, 6*i+3] > 0)[0]
# First do infection events
inf_to = zeros(len(s_present), dtype=my_int)
inf_rate = zeros(len(s_present))
for k in range(len(s_present)):
old_state = copy(states[s_present[k], :])
inf_rate[k] = old_state[6*i] * (
r_home[i, :].dot(
(old_state[i_pos] / iso_adjusted_comp[k])
+ (old_state[p_pos] / iso_adjusted_comp[k]) * tau # tau is prodromal reduction
+ iso_method*(old_state[iso_pos] / iso_adjusted_comp[k]) * tau_Q)) # if we are doing internal isolation we scale down by tau_Q
new_state = old_state.copy()
new_state[6*i] -= 1
new_state[6*i + 1] += 1
inf_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inf_rate, (s_present, inf_to)),
shape=(total_size, total_size))
inf_event_row = concatenate((inf_event_row, s_present))
inf_event_col = concatenate((inf_event_col, inf_to))
inf_event_class = concatenate(
(inf_event_class, classes_present[i]*ones((len(s_present)))))
# input('Press enter to continue')
# # disp('Infection events done')
# # Now do exposure to prodromal
inc_to = zeros(len(e_present), dtype=my_int)
inc_rate = zeros(len(e_present))
for k in range(len(e_present)):
# First do detected
old_state = copy(states[e_present[k], :])
inc_rate[k] = alpha_1 * old_state[6*i+1]
new_state = copy(old_state)
new_state[6*i + 1] -= 1
new_state[6*i + 2] += 1
inc_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(inc_rate, (e_present, inc_to)),
shape=(total_size, total_size))
# # disp('Incubaion events done')
# # Now do prodromal to infectious
dev_to = zeros(len(p_present), dtype=my_int)
dev_rate = zeros(len(p_present))
for k in range(len(p_present)):
# First do detected
old_state = copy(states[p_present[k], :])
dev_rate[k] = alpha_2 * old_state[6*i+2]
new_state = copy(old_state)
new_state[6*i + 2] -= 1
new_state[6*i + 3] += 1
dev_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(dev_rate, (p_present, dev_to)),
shape=(total_size, total_size))
# Now do recovery of detected cases
rec_to = zeros(len(i_present), dtype=my_int)
rec_rate = zeros(len(i_present))
for k in range(len(i_present)):
old_state = copy(states[i_present[k], :])
rec_rate[k] = gamma * old_state[6*i+3]
new_state = copy(old_state)
new_state[6*i+3] -= 1
new_state[6*i+4] += 1
rec_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(rec_rate, (i_present, rec_to)),
shape=(total_size, total_size))
# disp('Recovery events from detecteds done')
#Now do isolation
if (class_is_isolating[classes_present[i],classes_present]).any(): # This checks whether class i is meant to isolated and whether any of the vulnerable classes are present
# if not classes_present[i]==1:
# pdb.set_trace()
if iso_method==1 or (i<adult_bd) or not children_present: # If isolating internally, i is a child class, or there are no children around, anyone can isolate
e_can_isolate = where((states[:, 6*i+1] > 0)*(states[:, 6*i+5] == 0))[0]
p_can_isolate = where((states[:, 6*i+2] > 0)*(states[:, 6*i+5] == 0))[0]
i_can_isolate = where((states[:, 6*i+3] > 0)*(states[:, 6*i+5] == 0))[0]
else: # If children are present adults_isolating must stay below no_adults-1 so the children still have a guardian
e_can_isolate = where((states[:, 6*i+1] > 0)*(adults_isolating<no_adults-1))[0]
p_can_isolate = where((states[:, 6*i+2] > 0)*(adults_isolating<no_adults-1))[0]
i_can_isolate = where((states[:, 6*i+3] > 0)*(adults_isolating<no_adults-1))[0]
iso_present = where(states[:, 6*i+5] > 0)[0]
# Isolation of incubating cases
iso_to = zeros(len(e_can_isolate), dtype=my_int)
iso_rate = zeros(len(e_can_isolate))
for k in range(len(e_can_isolate)):
old_state = copy(states[e_can_isolate[k], :])
iso_rate[k] = E_iso_rate * old_state[6*i+1]
new_state = copy(old_state)
new_state[6*i+1] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (e_can_isolate, iso_to)),
shape=(total_size, total_size))
# Isolation of prodromal cases
iso_to = zeros(len(p_can_isolate), dtype=my_int)
iso_rate = zeros(len(p_can_isolate))
for k in range(len(p_can_isolate)):
old_state = copy(states[p_can_isolate[k], :])
iso_rate[k] = P_iso_rate * old_state[6*i+2]
new_state = copy(old_state)
new_state[6*i+2] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (p_can_isolate, iso_to)),
shape=(total_size, total_size))
# Isolation of fully infectious cases
iso_to = zeros(len(i_can_isolate), dtype=my_int)
iso_rate = zeros(len(i_can_isolate))
for k in range(len(i_can_isolate)):
old_state = copy(states[i_can_isolate[k], :])
iso_rate[k] = I_iso_rate * old_state[6*i+3]
new_state = copy(old_state)
new_state[6*i+3] -= 1
new_state[6*i+5] += 1
iso_to[k] = index_vector[
new_state.dot(reverse_prod) + new_state[-1], 0]
Q_int += sparse(
(iso_rate, (i_can_isolate, iso_to)),
shape=(total_size, total_size))
# Return home of isolated cases
return_to = zeros(len(iso_present), dtype=my_int)
return_rate = zeros(len(iso_present))
for k in range(len(iso_present)):
old_state = | copy(states[iso_present[k], :]) | numpy.copy |
#!/usr/bin/env python
import argparse
import os
import numpy as np
from tqdm import tqdm
import sys
sys.path.append("../models")
from dataset import CowcDataset_Counting
def compute_histogram(dataset):
hist = np.zeros(shape=[10**3, ], dtype=int)
for image, label in tqdm(dataset):
hist[label] += 1
car_count_max = np.where(hist > 0)[0][-1]
return hist[:car_count_max + 1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute images mean array')
parser.add_argument('--data-list', help='Path to training image-label list file',
default='../../data/cowc_processed/train_val/crop/train.txt')
parser.add_argument('--root', help='Root directory path of image files',
default='../../data/cowc_processed/train_val/crop/train')
parser.add_argument('--output', help='path to output distriba array',
default='../../data/cowc_processed/train_val/crop/histogram.npy')
parser.add_argument('--crop-size', type=int, help='Crop size in px',
default=96)
args = parser.parse_args()
dataset = CowcDataset_Counting(args.data_list, args.root, args.crop_size)
hist = compute_histogram(dataset)
print("Computed histogram:")
print("car_num, count")
for car_num, count in enumerate(hist):
print("{}, {}".format(car_num, count))
| np.save(args.output, hist) | numpy.save |
# Identification of donor abundance in bulk sample
import numpy as np
__docformat__ = "restructuredtext en"
__all__ = ['VireoBulk']
class VireoBulk():
"""
Estimate of donor abundance in a multipexed bulk sample
Varibale to infer
-----------------
psi: numpy.array (n_donor, )
The fractional abundance of each donor in the mixture
theta: numpy.array (n_GT, )
The alternative allele rate in each genotype category
Parameters
----------
n_GT: int, number of genotype categories
n_donor: int, number of donors in the mixture
"""
def __init__(self, n_donor, n_GT=3, psi_init=None,
theta_init=[0.01, 0.5, 0.99]):
self.n_GT = n_GT
self.n_donor = n_donor
self.psi = np.random.dirichlet([1] * n_donor)
self.theta = np.random.rand(n_GT)
if psi_init is not None:
if n_donor != len(psi_init):
print("Warning: n_donor != len(psi_init)")
else:
self.psi = np.random.dirichlet([1] * n_donor)
if theta_init is not None:
if n_GT != len(theta_init):
print("Warning: n_GT != len(theta_init)")
else:
self.theta = theta_init
def fit(self, AD, DP, GT_prob, max_iter=200, min_iter=5, epsilon_conv=1e-3,
learn_theta=True, delay_fit_theta=0, model="EM", verbose=False):
"""Fit the unknown variable psi and theta with EM algorithm
Parameters
----------
AD: numpy.array, (n_variant, ), int
The count vector for alternative allele in all variants
DP: numpy.array (n_variant, ), int
The count vector for depths in all variants (i.e., two alleles)
GT_prob: numpy.array, (n_variants, n_donor, n_GT)
The probability tensor for each genotype in each donor
learn_theta: bool
Whether learn theta, otherwise use theta_init
delay_fit_theta: int
The number of steps to delay in updating theta
max_iter : int
Maximum number of iterations
min_iter :
Minimum number of iterations
epsilon_conv : float
Threshold for detecting convergence
model: string
The algorithm used to fit the model. Only "EM" is supported for
Expectation-Maximumization algorithm
verbose : bool
Whether print out log info
"""
BD = DP - AD
logLik = | np.zeros(max_iter) | numpy.zeros |
from logging import getLogger
import types
import numpy as np
import scipy as sp
import scipy.stats
from statsmodels.sandbox.stats.multicomp import multipletests
from scipy.special import comb
logger = getLogger(__name__)
# data transformation
def rankdata(data):
logger.debug('ranking the data')
rdata = np.zeros(np.shape(data))
for crow in range(np.shape(data)[0]):
rdata[crow, :] = sp.stats.rankdata(data[crow, :])
return rdata
def log2data(data):
logger.debug('log2 transforming the data')
data[data < 2] = 2
data = np.log2(data)
return data
def binarydata(data):
logger.debug('binary transforming the data')
data[data != 0] = 1
return data
def normdata(data):
logger.debug('normalizing the data')
data = data / np.sum(data, axis=0)
return data
# different methods to calculate test statistic
def meandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
tstat = mean1 - mean0
return tstat
def stdmeandiff(data, labels):
mean0 = np.mean(data[:, labels == 0], axis=1)
mean1 = np.mean(data[:, labels == 1], axis=1)
sd0 = np.std(data[:, labels == 0], axis=1, ddof=1)
sd1 = np.std(data[:, labels == 1], axis=1, ddof=1)
sdsum = sd0 + sd1
# if feature has identical values in all samples in each group, std is 0
# fix it to 1 so won't divide by 0 (mean/std is undefined)
sdsum[sdsum == 0] = 1
tstat = (mean1 - mean0) / sdsum
return tstat
def mannwhitney(data, labels):
group0 = data[:, labels == 0]
group1 = data[:, labels == 1]
tstat = np.array([scipy.stats.mannwhitneyu(group0[i, :], group1[i, :], alternative='two-sided')
.statistic for i in range(np.shape(data)[0])])
return tstat
# kruwallis give a column vector while others give row vector
def kruwallis(data, labels):
n = len(np.unique(labels))
allt = np.zeros(np.shape(data)[0])
for cbact in range(np.shape(data)[0]):
group = []
for j in range(n):
group.append(data[cbact, labels == j])
tstat = scipy.stats.kruskal(*group).statistic
allt[cbact] = tstat
return allt
def pearson(data, labels):
tstat = np.array([scipy.stats.pearsonr(data[i, :],
labels)[0] for i in range(np.shape(data)[0])])
return tstat
def spearman(data, labels):
tstat = np.array([scipy.stats.spearmanr(data[i, :],
labels).correlation for i in range(np.shape(data)[0])])
return tstat
# new fdr method
def dsfdr(data, labels, transform_type='rankdata', method='meandiff',
alpha=0.1, numperm=1000, fdr_method='dsfdr', random_seed=None):
'''
calculate the Discrete FDR for the data
Parameters
----------
data : N x S numpy array
each column is a sample (S total), each row a feature (N total)
labels : a 1d numpy array (length S)
the labels of each sample (same order as data) with the group
(0/1 if binary, 0-G-1 if G groups, or numeric values for correlation)
transform_type : str or None
transformation to apply to the data before caluculating
the test statistic
'rankdata' : rank transfrom each feature
'log2data' : calculate log2 for each feature using minimal cutoff of 2
'normdata' : normalize the data to constant sum per samples
'binarydata' : convert to binary absence/presence
None : no transformation to perform
method : str or function
the method to use for calculating test statistics:
'meandiff' : mean(A)-mean(B) (binary)
'mannwhitney' : mann-whitney u-test (binary)
'kruwallis' : kruskal-wallis test (multiple groups)
'stdmeandiff' : (mean(A)-mean(B))/(std(A)+std(B)) (binary)
'spearman' : spearman correlation (numeric)
'pearson' : pearson correlation (numeric)
'nonzerospearman' : spearman correlation only non-zero entries
(numeric)
'nonzeropearson' : pearson correlation only non-zero entries (numeric)
function : use this function to calculate the test statistic
(input is data,labels, output is array of float)
alpha : float
the desired FDR control level
numperm : int
number of permutations to perform
fdr_method : str
the FDR procedure to determine significant bacteria
'dsfdr' : discrete FDR method
'bhfdr' : Benjamini-Hochberg FDR method
'byfdr' : Benjamini-Yekutielli FDR method
'filterBH' : Benjamini-Hochberg FDR method with filtering
random_seed : int, np.radnom.Generator instance or None, optional, default=None
set the random number generator seed for the random permutations
If int, random_seed is the seed used by the random number generator;
If Generator instance, random_seed is set to the random number generator;
If None, then fresh, unpredictable entropy will be pulled from the OS
Returns
-------
reject : np array of bool (length N)
True for features where the null hypothesis is rejected
tstat : np array of float (length N)
the test statistic value for each feature (for effect size)
pvals : np array of float (length N)
the p-value (uncorrected) for each feature
qvals: np array of float (length N)
the q-value (corrected p-value) for each feature.
'''
logger.debug('dsfdr using fdr method: %s' % fdr_method)
# create the numpy.random.Generator
rng = np.random.default_rng(random_seed)
data = data.copy()
if fdr_method == 'filterBH':
index = []
n0 = np.sum(labels == 0)
n1 = np.sum(labels == 1)
for i in range(np.shape(data)[0]):
nonzeros = np.count_nonzero(data[i, :])
if nonzeros < min(n0, n1):
pval_min = (comb(n0, nonzeros, exact=True)
+ comb(n1, nonzeros, exact=True)) / comb(n0 + n1, nonzeros)
if pval_min <= alpha:
index.append(i)
else:
index.append(i)
data = data[index, :]
# transform the data
if transform_type == 'rankdata':
data = rankdata(data)
elif transform_type == 'log2data':
data = log2data(data)
elif transform_type == 'binarydata':
data = binarydata(data)
elif transform_type == 'normdata':
data = normdata(data)
elif transform_type is None:
pass
else:
raise ValueError('transform type %s not supported' % transform_type)
numbact = np.shape(data)[0]
labels = labels.copy()
numbact = np.shape(data)[0]
labels = labels.copy()
logger.debug('start permutation')
if method == 'meandiff':
# fast matrix multiplication based calculation
method = meandiff
tstat = method(data, labels)
t = np.abs(tstat)
numsamples = np.shape(data)[1]
p = np.zeros([numsamples, numperm])
k1 = 1 / np.sum(labels == 0)
k2 = 1 / np.sum(labels == 1)
for cperm in range(numperm):
rng.shuffle(labels)
p[labels == 0, cperm] = k1
p2 = np.ones(p.shape) * k2
p2[p > 0] = 0
mean1 = np.dot(data, p)
mean2 = np.dot(data, p2)
u = np.abs(mean1 - mean2)
elif method == 'mannwhitney' or method == \
'kruwallis' or method == 'stdmeandiff':
if method == 'mannwhitney':
method = mannwhitney
if method == 'kruwallis':
method = kruwallis
if method == 'stdmeandiff':
method = stdmeandiff
tstat = method(data, labels)
t = np.abs(tstat)
u = np.zeros([numbact, numperm])
for cperm in range(numperm):
rlabels = rng.permutation(labels)
rt = method(data, rlabels)
u[:, cperm] = rt
elif method == 'spearman' or method == 'pearson':
# fast matrix multiplication based correlation
if method == 'spearman':
data = rankdata(data)
labels = sp.stats.rankdata(labels)
meanval = np.mean(data, axis=1).reshape([data.shape[0], 1])
data = data - np.repeat(meanval, data.shape[1], axis=1)
labels = labels - np.mean(labels)
tstat = np.dot(data, labels)
t = np.abs(tstat)
# calculate the normalized test statistic
stdval = np.std(data, axis=1).reshape([data.shape[0], 1])
# to fix problem with 0 std divide by zero (since we permute it's ok)
# note we don't remove from mutiple hypothesis - could be done better
stdval[stdval == 0] = 1
tdata = data / np.repeat(stdval, data.shape[1], axis=1)
meanval = np.mean(tdata, axis=1).reshape([tdata.shape[0], 1])
tdata = tdata - np.repeat(meanval, tdata.shape[1], axis=1)
meanval = np.mean(data, axis=1).reshape([data.shape[0], 1])
tdata = tdata - np.repeat(meanval, tdata.shape[1], axis=1)
tlabels = labels / np.std(labels)
# fix for n since we multiply without normalizing for n
tlabels = tlabels / len(tlabels)
tlabels = tlabels - np.mean(tlabels)
tstat = np.dot(tdata, tlabels)
permlabels = np.zeros([len(labels), numperm])
for cperm in range(numperm):
rlabels = rng.permutation(labels)
permlabels[:, cperm] = rlabels
u = np.abs(np.dot(data, permlabels))
elif method == 'nonzerospearman' or method == 'nonzeropearson':
t = np.zeros([numbact])
tstat = np.zeros([numbact])
u = np.zeros([numbact, numperm])
for i in range(numbact):
index = np.nonzero(data[i, :])
label_nonzero = labels[index]
sample_nonzero = data[i, :][index]
if len(sample_nonzero) == 0:
continue
if method == 'nonzerospearman':
sample_nonzero = sp.stats.rankdata(sample_nonzero)
label_nonzero = sp.stats.rankdata(label_nonzero)
sample_nonzero = sample_nonzero - np.mean(sample_nonzero)
label_nonzero = label_nonzero - np.mean(label_nonzero)
tstat[i] = np.dot(sample_nonzero, label_nonzero)
t[i] = np.abs(tstat[i])
if np.std(sample_nonzero) == 0:
continue
tstat[i] = tstat[i] / (np.std(sample_nonzero) * np.std(label_nonzero) * len(sample_nonzero))
permlabels = np.zeros([len(label_nonzero), numperm])
for cperm in range(numperm):
rlabels = rng.permutation(label_nonzero)
permlabels[:, cperm] = rlabels
u[i, :] = np.abs(np.dot(sample_nonzero, permlabels))
elif isinstance(method, types.FunctionType):
# call the user-defined function of statistical test
t = method(data, labels)
tstat = t.copy()
# Get the abs() of the statistic since we are doing a double-sided test for dsFDR
t = np.abs(tstat)
u = np.zeros([numbact, numperm])
for cperm in range(numperm):
rlabels = rng.permutation(labels)
rt = method(data, rlabels)
u[:, cperm] = rt
u = np.abs(u)
else:
raise ValueError('unsupported method %s' % method)
# fix floating point errors (important for permutation values!)
# https://github.com/numpy/numpy/issues/8116
for crow in range(numbact):
closepos = np.isclose(t[crow], u[crow, :])
u[crow, closepos] = t[crow]
# calculate permutation p-vals
pvals = np.zeros([numbact]) # p-value for original test statistic t
qvals = np.ones([numbact]) # q-value (corrected p-value) for each feature.
pvals_u = np.zeros([numbact, numperm])
# pseudo p-values for permutated test statistic u
for crow in range(numbact):
allstat = np.hstack([t[crow], u[crow, :]])
stat_rank = sp.stats.rankdata(allstat, method='min')
allstat = 1 - ((stat_rank - 1) / len(allstat))
# assign ranks to t from biggest as 1
pvals[crow] = allstat[0]
pvals_u[crow, :] = allstat[1:]
# calculate FDR
if fdr_method == 'dsfdr':
# sort unique p-values for original test statistics biggest to smallest
pvals_unique = np.unique(pvals)
sortp = pvals_unique[np.argsort(-pvals_unique)]
# find a data-dependent threshold for the p-value
foundit = False
allfdr = []
allt = []
for cp in sortp:
realnum = np.sum(pvals <= cp)
fdr = (realnum + np.count_nonzero(
pvals_u <= cp)) / (realnum * (numperm + 1))
allfdr.append(fdr)
allt.append(cp)
if fdr <= alpha:
if not foundit:
realcp = cp
foundit = True
if not foundit:
# no good threshold was found
reject = np.repeat([False], numbact)
return reject, tstat, pvals, qvals
# fill the reject null hypothesis
reject = np.zeros(numbact, dtype=int)
reject = (pvals <= realcp)
# fill the q-values
for idx, cfdr in enumerate(allfdr):
# fix for qval > 1 (since we count on all features in random permutation)
cfdr = | np.min([cfdr, 1]) | numpy.min |
import numpy as np
import tensorflow as tf
import gym
import time
from spinup.algos.ude_td3 import core
from spinup.algos.ude_td3.core import get_vars
from spinup.algos.ude_td3.investigate_uncertainty import DropoutUncertaintyModule,ObsSampleUncertaintyModule
from spinup.utils.logx import EpochLogger, Logger
class ReplayBuffer:
"""
A simple FIFO experience replay buffer for TD3 agents.
"""
def __init__(self, obs_dim, act_dim, size,
logger_fname='experiences_log.txt', **logger_kwargs):
# ExperienceLogger: save experiences for supervised learning
logger_kwargs['output_fname'] = logger_fname
self.experience_logger = Logger(**logger_kwargs)
self.obs1_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.obs2_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size, act_dim], dtype=np.float32)
self.rews_buf = np.zeros(size, dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.ptr, self.size, self.max_size = 0, 0, size
def store(self, obs, act, rew, next_obs, done,
uncertainty,
q1_pred, q2_pred, q1_post, q2_post,
rnd_e_act, rnd_e_cri,
step_index, steps_per_epoch, start_time):
# Save experiences in disk
self.log_experiences(obs, act, rew, next_obs, done,
uncertainty,
q1_pred, q2_pred, q1_post, q2_post,
rnd_e_act, rnd_e_cri,
step_index, steps_per_epoch, start_time)
# Save experiences in memory
self.obs1_buf[self.ptr] = obs
self.obs2_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def sample_batch(self, batch_size=32):
idxs = np.random.randint(0, self.size, size=batch_size)
return dict(obs1=self.obs1_buf[idxs],
obs2=self.obs2_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
def log_experiences(self, obs, act, rew, next_obs, done,
uncertainty,
q1_pred, q2_pred, q1_post, q2_post,
rnd_e_act, rnd_e_cri,
step_index, steps_per_epoch, start_time):
self.experience_logger.log_tabular('Epoch', step_index // steps_per_epoch)
self.experience_logger.log_tabular('Step', step_index)
# Log observation
for i, o_i in enumerate(obs):
self.experience_logger.log_tabular('o_{}'.format(i), o_i)
# Log action
for i, a_i in enumerate(act):
self.experience_logger.log_tabular('a_{}'.format(i), a_i)
# Log reward
self.experience_logger.log_tabular('r', rew)
# Log next observation
for i, o2_i in enumerate(next_obs):
self.experience_logger.log_tabular('o2_{}'.format(i), o2_i)
# Log uncertainty: flatten in row-major order
for i, unc_i in enumerate(np.array(uncertainty).flatten(order='C')):
self.experience_logger.log_tabular('unc_{}'.format(i), unc_i)
# Log q1_post, q2_post
self.experience_logger.log_tabular('q1_pred', q1_pred)
self.experience_logger.log_tabular('q2_pred', q2_pred)
# Log q1_post, q2_post
for i in range(len(q1_post)):
self.experience_logger.log_tabular('q1_post_{}'.format(i), q1_post[i])
self.experience_logger.log_tabular('q2_post_{}'.format(i), q2_post[i])
# Log RND actor prediction error
self.experience_logger.log_tabular('rnd_e_act', rnd_e_act)
# Log RND critic prediction error
self.experience_logger.log_tabular('rnd_e_cri', rnd_e_cri)
# Log done
self.experience_logger.log_tabular('d', done)
self.experience_logger.log_tabular('Time', time.time() - start_time)
self.experience_logger.dump_tabular(print_data=False)
"""
UDE-TD3 (Uncertainty Driven Exploration Twin Delayed DDPG)
"""
def ude_td3(env_fn, render_env=False, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=5000, epochs=100, replay_size=int(1e6), gamma=0.99,
polyak=0.995, pi_lr=1e-3, q_lr=1e-3,
reward_scale=5,
without_start_steps=True, batch_size=100, start_steps=10000,
without_delay_train=False,
act_noise=0.1, target_noise=0.2, noise_clip=0.5, policy_delay=2,
max_ep_len=1000, logger_kwargs=dict(), save_freq=1,
n_post_action=10,
uncertainty_method='dropout',
sample_obs_std=1,
uncertainty_driven_exploration=False,
uncertainty_policy_delay=5000,
dropout_rate=0.1,
concentration_factor=0.1,
minimum_exploration_level=0,
):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Deterministically computes actions
| from policy given states.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to TD3.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
target_noise (float): Stddev for smoothing noise added to target
policy.
noise_clip (float): Limit for absolute value of target policy
smoothing noise.
policy_delay (int): Policy will only be updated once every
policy_delay times for each update of the Q-networks.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
# TODO: Test no start steps
if without_start_steps:
start_steps = batch_size
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, None, None)
print('Creating networks ...')
# Main outputs from computation graph
with tf.variable_scope('main'):
pi, _, pi_dropout_mask_generator, pi_dropout_mask_phs, \
q1, _, q1_dropout_mask_generator, q1_dropout_mask_phs, q1_pi, _, \
q2, _, q2_dropout_mask_generator, q2_dropout_mask_phs = actor_critic(x_ph, a_ph, **ac_kwargs, dropout_rate=0)
# Random Network Distillation
with tf.variable_scope('random_net_distill'):
# RND Target and Predictor Network
rnd_lr = 1e-3
rnd_targ_act, rnd_pred_act, rnd_targ_cri, rnd_pred_cri = core.random_net_distill(x_ph, a_ph, **ac_kwargs)
# TODO: add environment model learning transition dynamics
with tf.variable_scope('uncertainty'):
pi_unc, _, pi_dropout_mask_generator_unc, pi_dropout_mask_phs_unc, \
q1_unc, _, q1_dropout_mask_generator_unc, q1_dropout_mask_phs_unc, q1_pi_unc, _, \
q2_unc, _, q2_dropout_mask_generator_unc, q2_dropout_mask_phs_unc = actor_critic(x_ph, a_ph, **ac_kwargs, dropout_rate=dropout_rate)
# TODO: Calculate Uncertainty of Q-value function
# Initialize uncertainty module
obs_set_size = 10
track_obs_set_unc_frequency = 100 # every 100 steps
if uncertainty_method == 'dropout':
pi_unc_module = DropoutUncertaintyModule(act_dim, obs_dim, n_post_action,
obs_set_size, track_obs_set_unc_frequency,
x_ph, a_ph, pi_unc, q1_unc, q2_unc,
pi_dropout_mask_phs_unc, pi_dropout_mask_generator_unc,
q1_dropout_mask_phs_unc, q1_dropout_mask_generator_unc,
q2_dropout_mask_phs_unc, q2_dropout_mask_generator_unc,
rnd_targ_act, rnd_pred_act,
rnd_targ_cri, rnd_pred_cri,
logger_kwargs,
tf_var_scope_main='main',
tf_var_scope_target='target', tf_var_scope_unc='uncertainty')
elif uncertainty_method == 'gaussian_obs_sample':
pi_unc_module = ObsSampleUncertaintyModule(act_dim, obs_dim, n_post_action,
obs_set_size, track_obs_set_unc_frequency,
pi_unc, x_ph,
pi_dropout_mask_phs_unc, pi_dropout_mask_generator_unc,
logger_kwargs, sample_obs_std)
else:
raise ValueError('Please choose a proper uncertainty_method!')
# Target policy network
with tf.variable_scope('target'):
pi_targ, _, pi_dropout_mask_generator_targ, pi_dropout_mask_phs_targ, \
_, _, _, _, _, _, \
_, _, _, _ = actor_critic(x2_ph, a_ph, **ac_kwargs, dropout_rate=dropout_rate)
# Target Q networks
with tf.variable_scope('target', reuse=True):
# TODO: add with_out_policy_smoothing
# Target policy smoothing, by adding clipped noise to target actions
epsilon = tf.random_normal(tf.shape(pi_targ), stddev=target_noise)
epsilon = tf.clip_by_value(epsilon, -noise_clip, noise_clip)
a2 = pi_targ + epsilon
a2 = tf.clip_by_value(a2, -act_limit, act_limit)
# Target Q-values, using action from target policy
_, _, _, _, \
q1_targ, _, q1_dropout_mask_generator_targ, q1_dropout_mask_phs_targ, _, _, \
q2_targ, _, q2_dropout_mask_generator_targ, q2_dropout_mask_phs_targ = actor_critic(x2_ph, a2, **ac_kwargs, dropout_rate=dropout_rate)
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size,
logger_fname='experiences_log.txt', **logger_kwargs)
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['main/pi', 'main/q1', 'main/q2', 'main'])
print('\nNumber of parameters: \t pi: %d, \t q1: %d, \t q2: %d, \t total: %d\n'%var_counts)
# TODO: use conservative estimation of Q
# Bellman backup for Q functions, using Clipped Double-Q targets
def post_sample_q1_and_q2(feed_dictionary, batch_size):
dropout_masks_set_q1 = {i: q1_dropout_mask_generator_targ.generate_dropout_mask() for i in
range(n_post_action)}
dropout_masks_set_q2 = {i: q2_dropout_mask_generator_targ.generate_dropout_mask() for i in
range(n_post_action)}
q1_targ_post = np.zeros((batch_size, n_post_action))
q2_targ_post = np.zeros((batch_size, n_post_action))
for post_i in range(n_post_action):
# Post sampled q
for mask_i in range(len(q1_dropout_mask_phs_targ)):
feed_dictionary[q1_dropout_mask_phs_targ[mask_i]] = dropout_masks_set_q1[post_i][mask_i]
feed_dictionary[q2_dropout_mask_phs_targ[mask_i]] = dropout_masks_set_q2[post_i][mask_i]
q1_targ_post[:, post_i] = sess.run(q1_targ, feed_dict=feed_dictionary)
q2_targ_post[:, post_i] = sess.run(q2_targ, feed_dict=feed_dictionary)
min_q_targ = np.minimum(q1_targ_post.mean(axis=1), q2_targ_post.mean(axis=1))
return min_q_targ
# min_q_targ = tf.placeholder(dtype=tf.float32)
# backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*min_q_targ)
min_q_targ = tf.minimum(q1_targ, q2_targ)
backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*min_q_targ)
# TD3 losses
pi_loss = -tf.reduce_mean(q1_pi)
q1_loss = tf.reduce_mean((q1-backup)**2)
q2_loss = tf.reduce_mean((q2-backup)**2)
q_loss = q1_loss + q2_loss
# Separate train ops for pi, q
pi_optimizer = tf.train.AdamOptimizer(learning_rate=pi_lr)
q_optimizer = tf.train.AdamOptimizer(learning_rate=q_lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
train_q_op = q_optimizer.minimize(q_loss, var_list=get_vars('main/q'))
# RND losses and train ops
rnd_loss_act = tf.reduce_mean((rnd_pred_act - rnd_targ_act)**2)
rnd_optimizer_act = tf.train.AdamOptimizer(learning_rate=rnd_lr)
train_rnd_op_act = rnd_optimizer_act.minimize(rnd_loss_act,
var_list=get_vars('random_net_distill/rnd_pred_act'))
rnd_loss_cri = tf.reduce_mean((rnd_pred_cri - rnd_targ_cri)**2)
rnd_optimizer_cri = tf.train.AdamOptimizer(learning_rate=rnd_lr)
train_rnd_op_cri = rnd_optimizer_cri.minimize(rnd_loss_cri,
var_list=get_vars('random_net_distill/rnd_pred_cri'))
# Polyak averaging for target variables
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph}, outputs={'pi': pi, 'q1': q1, 'q2': q2})
def get_action_train(o, noise_scale, pi_unc_module, step_index):
# RND actor
rnd_t_act, rnd_p_act, rnd_e_act = pi_unc_module.calculate_actor_RND_pred_error(o, sess)
# Generate action
feed_dictionary = {x_ph: o.reshape(1, -1)}
if uncertainty_driven_exploration:
# 1. Generate action Prediction, and q1 and q2 prediction
for mask_i in range(len(pi_dropout_mask_phs)):
feed_dictionary[pi_dropout_mask_phs[mask_i]] = np.ones(pi_dropout_mask_phs[mask_i].shape.as_list())
a_prediction = sess.run(pi, feed_dict=feed_dictionary)[0]
for mask_i in range(len(q1_dropout_mask_phs)):
feed_dictionary[q1_dropout_mask_phs[mask_i]] = np.ones(q1_dropout_mask_phs[mask_i].shape.as_list())
feed_dictionary[q2_dropout_mask_phs[mask_i]] = np.ones(q2_dropout_mask_phs[mask_i].shape.as_list())
feed_dictionary[a_ph] = a_prediction.reshape(1,-1)
q1_pred = sess.run(q1, feed_dict=feed_dictionary)[0]
q2_pred = sess.run(q2, feed_dict=feed_dictionary)[0]
# 2. Generate post samples in Non-parallel way
# (Tried to use ray implementing parallel post sampling but no speed up in one machine.)
# TODO: generate a batch of dropout mask and multiply with weights to get a set
# of post sampled action in one see.run() to speed up sampling.
a_post = pi_unc_module.get_post_samples(o, sess, step_index)
q1_post, q2_post = pi_unc_module.get_post_samples_q(o, a_prediction, sess, step_index)
# 3. Generate uncertainty-driven exploratory action
a = | np.zeros((act_dim,)) | numpy.zeros |
from __future__ import print_function, division
import os
import sys
import pytest
import warnings
import numpy
from galpy.util import galpyWarning
from test_actionAngle import reset_warning_registry
_TRAVIS= bool(os.getenv('TRAVIS'))
PY2= sys.version < '3'
# Print all galpyWarnings always for tests of warnings
warnings.simplefilter("always",galpyWarning)
#Basic sanity checking: circular orbit should have constant R, zero vR, vT=vc
def test_actionAngleTorus_basic():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential, rl, vcirc, \
FlattenedPowerPotential, PlummerPotential
tol= -4.
jr= 10.**-10.
jz= 10.**-10.
aAT= actionAngleTorus(pot=MWPotential)
# at R=1, Lz=1
jphi= 1.
angler= numpy.linspace(0.,2.*numpy.pi,101)
anglephi= numpy.linspace(0.,2.*numpy.pi,101)+1.
anglez= numpy.linspace(0.,2.*numpy.pi,101)+2.
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(MWPotential,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(MWPotential,rl(MWPotential,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=1.5, using Plummer
tol= -3.25
pp= PlummerPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(pp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(pp,rl(pp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
# at Lz=0.5, using FlattenedPowerPotential
tol= -4.
fp= FlattenedPowerPotential(normalize=1.)
aAT= actionAngleTorus(pot=fp)
jphi= 0.5
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
assert numpy.all(numpy.fabs(RvR[0]-rl(fp,jphi)) < 10.**tol), \
'circular orbit does not have constant radius for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[1]) < 10.**tol), \
'circular orbit does not have zero radial velocity for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[2]-vcirc(fp,rl(fp,jphi))) < 10.**tol), \
'circular orbit does not have constant vT=vc for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[3]) < 10.**tol), \
'circular orbit does not have zero vertical height for actionAngleTorus'
assert numpy.all(numpy.fabs(RvR[4]) < 10.**tol), \
'circular orbit does not have zero vertical velocity for actionAngleTorus'
return None
#Basic sanity checking: close-to-circular orbit should have freq. = epicycle freq.
def test_actionAngleTorus_basic_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import epifreq, omegac, verticalfreq, rl, \
JaffePotential, PowerSphericalPotential, HernquistPotential
tol= -3.
jr= 10.**-6.
jz= 10.**-6.
jp= JaffePotential(normalize=1.)
aAT= actionAngleTorus(pot=jp)
# at Lz=1
jphi= 1.
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(jp,rl(jp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(jp,rl(jp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(jp,rl(jp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=1.5, w/ different potential
pp= PowerSphericalPotential(normalize=1.)
aAT= actionAngleTorus(pot=pp)
jphi= 1.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(pp,rl(pp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(pp,rl(pp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(pp,rl(pp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
# at Lz=0.5, w/ different potential
tol= -2.5 # appears more difficult
hp= HernquistPotential(normalize=1.)
aAT= actionAngleTorus(pot=hp)
jphi= 0.5
om= aAT.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-epifreq(hp,rl(hp,jphi)))/om[0]) < 10.**tol, \
'Close-to-circular orbit does not have Or=kappa for actionAngleTorus'
assert numpy.fabs((om[1]-omegac(hp,rl(hp,jphi)))/om[1]) < 10.**tol, \
'Close-to-circular orbit does not have Ophi=omega for actionAngleTorus'
assert numpy.fabs((om[2]-verticalfreq(hp,rl(hp,jphi)))/om[2]) < 10.**tol, \
'Close-to-circular orbit does not have Oz=nu for actionAngleTorus'
return None
#Test that orbit from actionAngleTorus is the same as an integrated orbit
def test_actionAngleTorus_orbit():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import MWPotential2014
from galpy.orbit import Orbit
# Set up instance
aAT= actionAngleTorus(pot=MWPotential2014,tol=10.**-5.)
jr,jphi,jz= 0.05,1.1,0.025
# First calculate frequencies and the initial RvR
RvRom= aAT.xvFreqs(jr,jphi,jz,
numpy.array([0.]),
numpy.array([1.]),
numpy.array([2.]))
om= RvRom[1:]
# Angles along an orbit
ts= numpy.linspace(0.,100.,1001)
angler= ts*om[0]
anglephi= 1.+ts*om[1]
anglez= 2.+ts*om[2]
# Calculate the orbit using actionAngleTorus
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate the orbit using orbit integration
orb= Orbit([RvRom[0][0,0],RvRom[0][0,1],RvRom[0][0,2],
RvRom[0][0,3],RvRom[0][0,4],RvRom[0][0,5]])
orb.integrate(ts,MWPotential2014)
# Compare
tol= -3.
assert numpy.all(numpy.fabs(orb.R(ts)-RvR[0]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in R'
assert numpy.all(numpy.fabs(orb.vR(ts)-RvR[1]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vR'
assert numpy.all(numpy.fabs(orb.vT(ts)-RvR[2]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vT'
assert numpy.all(numpy.fabs(orb.z(ts)-RvR[3]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in z'
assert numpy.all(numpy.fabs(orb.vz(ts)-RvR[4]) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in vz'
assert numpy.all(numpy.fabs((orb.phi(ts)-RvR[5]+numpy.pi) % (2.*numpy.pi) -numpy.pi) < 10.**tol), \
'Integrated orbit does not agree with torus orbit in phi'
return None
# Test that actionAngleTorus w/ interp pot gives same freqs as regular pot
# Doesn't work well: TM aborts because our interpolated forces aren't
# consistent enough with the potential for TM's taste, but we test that it at
# at least works somewhat
def test_actionAngleTorus_interppot_freqs():
from galpy.actionAngle import actionAngleTorus
from galpy.potential import LogarithmicHaloPotential, interpRZPotential
lp= LogarithmicHaloPotential(normalize=1.)
ip= interpRZPotential(RZPot=lp,
interpPot=True,
interpDens=True,interpRforce=True,interpzforce=True,
enable_c=True)
aAT= actionAngleTorus(pot=lp)
aATi= actionAngleTorus(pot=ip)
jr,jphi,jz= 0.05,1.1,0.02
om= aAT.Freqs(jr,jphi,jz)
omi= aATi.Freqs(jr,jphi,jz)
assert numpy.fabs((om[0]-omi[0])/om[0]) < 0.2, 'Radial frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[1]-omi[1])/om[1]) < 0.2, 'Azimuthal frequency computed using the torus machine does not agree between potential and interpolated potential'
assert numpy.fabs((om[2]-omi[2])/om[2]) < 0.8, 'Vertical frequency computed using the torus machine does not agree between potential and interpolated potential'
return None
#Test the actionAngleTorus against an isochrone potential: actions
def test_actionAngleTorus_Isochrone_actions():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.])
anglephi= numpy.array([numpy.pi])
anglez= numpy.array([numpy.pi/2.])
# Calculate position from aAT
RvR= aAT(jr,jphi,jz,angler,anglephi,anglez).T
# Calculate actions from aAI
ji= aAI(*RvR)
djr= numpy.fabs((ji[0]-jr)/jr)
dlz= numpy.fabs((ji[1]-jphi)/jphi)
djz= numpy.fabs((ji[2]-jz)/jz)
assert djr < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djr*100.)
assert dlz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (dlz*100.)
assert djz < 10.**tol, 'actionAngleTorus and actionAngleIsochrone applied to isochrone potential disagree for Jr at %f%%' % (djz*100.)
return None
#Test the actionAngleTorus against an isochrone potential: frequencies and angles
def test_actionAngleTorus_Isochrone_freqsAngles():
from galpy.potential import IsochronePotential
from galpy.actionAngle import actionAngleTorus, \
actionAngleIsochrone
ip= IsochronePotential(normalize=1.,b=1.2)
aAI= actionAngleIsochrone(ip=ip)
tol= -6.
aAT= actionAngleTorus(pot=ip,tol=tol)
jr,jphi,jz= 0.075,1.1,0.05
angler= numpy.array([0.1])+ | numpy.linspace(0.,numpy.pi,101) | numpy.linspace |
"""Parse CaffeModel.
Helped by caffe2theano, MarcBS's Caffe2Keras module.
Author: <NAME>
Email : <EMAIL>
"""
from __future__ import print_function
from collections import OrderedDict
import numpy as np
from scipy.io import loadmat
from transcaffe import caffe_pb2, utils
from google.protobuf.text_format import Merge
from keras.models import Model
from transcaffe import layers as L
v1_map = {0: 'NONE', 1: 'ACCURACY', 2: 'BNLL', 3: 'CONCAT', 4: 'CONVOLUTION',
5: 'DATA', 6: 'DROPOUT', 7: 'EUCLIDEANLOSS', 8: 'FLATTEN',
9: 'HDF5DATA', 10: 'HDF5OUTPUT', 11: 'IM2COL', 12: 'IMAGEDATA',
13: 'INFOGAINLOSS', 14: 'INNERPRODUCT', 15: 'LRN',
16: 'MULTINOMIALLOGISTICLOSS', 17: 'POOLING', 18: 'RELU',
19: 'SIGMOID', 20: 'SOFTMAX', 21: 'SOFTMAXWITHLOSS', 22: 'SPLIT',
23: 'TANH', 24: 'WINDOWDATA', 25: 'ELTWISE', 26: 'POWER',
27: 'SIGMOIDCROSSENTROPYLOSS', 28: 'HINGELOSS', 29: 'MEMORYDATA',
30: 'ARGMAX', 31: 'THRESHOLD', 32: 'DUMMY_DATA', 33: 'SLICE',
34: 'MVN', 35: 'ABSVAL', 36: 'SILENCE', 37: 'CONTRASTIVELOSS',
38: 'EXP', 39: 'DECONVOLUTION'}
def load(model_def, model_bin, target_lib="keras"):
"""Load a Caffe model and convert to target library.
Parameters
----------
model_def : string
absolute path of a given .protobuf text
model_bin : string
absolute path of a given .caffemodel binary
target_lib : string
target library, currently only Keras is supported.
In planning: Lasagne, TensorFlow
Returns
-------
model : keras.models.model
a loaded model.
"""
print ("[MESSAGE] Target model is loading...")
net_param = parse_protobuf(model_def)
layers, version = get_layers(net_param)
input_dim = get_input_size(net_param)
model = get_model(layers, 1, tuple(input_dim[1:]), net_param.name)
print ("[MESSAGE] Printing converted model...")
model.summary()
print ("[MESSAGE] The model is built.")
print ("[MESSAGE] Parsing network parameters...")
param_layers, _ = parse_caffemodel(model_bin)
net_weights = get_network_weights(param_layers, version)
print ("[MESSAGE] Loading parameters into network...")
build_model(model, net_weights)
print ("[MESSAGE] The model is loaded successfully.")
return model
def parse_caffemodel(filename):
"""Parse a given caffemodel.
Parameters
----------
filename : string
absolute path of a given .caffemodel
Returns
-------
layers : list
The list representation of the network
version : string
pretrined network version
"""
utils.file_checker(filename)
net_param = caffe_pb2.NetParameter()
f = open(filename, mode="rb")
contents = f.read()
f.close()
net_param.ParseFromString(contents)
return get_layers(net_param)
def parse_mean_file(filename, mode="proto"):
"""Parse a mean file by given path.
TODO: complete more options based on different Caffe Models
Parameters
----------
filename : string
absolute path of the mean file
mode : string
"proto" for .binaryproto file
"mat" for MAT binary file
Returns
-------
mean_mat : numpy.ndarray
an array that contains the mean values
"""
utils.file_checker(filename)
if mode == "proto":
tp = caffe_pb2.TransformationParameter()
f = open(filename, mode="rb")
mean_contents = f.read()
f.close()
tp.ParseFromString(mean_contents)
mean_mat = np.array(tp.mean_value).reshape((3,
tp.crop_size,
tp.crop_size))
mean_mat = np.transpose(mean_mat, (1, 2, 0))
elif mode == "mat":
# based on VGG's Mat file.
mean_contents = loadmat(filename)
mean_mat = mean_contents["image_mean"]
print(mean_mat.shape)
return mean_mat
def parse_protobuf(filename):
"""Parse a given protobuf file.
Parameters
----------
filename : string
absolute path of .prototxt file
Returns
-------
net_param : caffe_pb2.NetParameter
The parsed .prototxt structure.
"""
utils.file_checker(filename)
f = open(filename, mode="rb")
net_param = caffe_pb2.NetParameter()
net_def = f.read()
# append quotes around type information if needed.
# it seems not working because has newer definititon?
# net_def = f.read().split("\n")
# for i, line in enumerate(net_def):
# l = line.strip().replace(" ", "").split('#')[0]
# if len(l) > 6 and l[:5] == 'type:' and l[5] != "\'" and l[5] != '\"':
# type_ = l[5:]
# net_def[i] = ' type: "' + type_ + '"'
#
# net_def = '\n'.join(net_def)
# Check before Merge? For V1?
Merge(net_def, net_param)
f.close()
return net_param
def get_layers(net_param):
"""Get layers information.
Parameters
----------
net_param : caffe_pb2.NetParameter
A pretrined network description.
Returns
-------
layers : list
description of the layers.
version : string
version information of the pretrained model.
"""
if len(net_param.layers) > 0:
return net_param.layers[:], "V1"
elif len(net_param.layer) > 0:
return net_param.layer[:], "V2"
else:
raise Exception("Couldn't find layers!")
def get_layer_type(layer):
"""Get a given layer type.
Parameters
----------
layer : caffe_pb2.V1LayerParameter
a given layer in the network
Returns
-------
type : int or string
type of the layer.
"""
if type(layer.type) == int:
return str(v1_map[layer.type]).lower()
else:
return str(layer.type).lower()
def get_input_size(net_param):
"""Get input parameters, or guess one at least.
Parameters
----------
net_param : caffe_pb2.NetParameter
structure that contains all the network parameters
Returns
-------
in_size : tuple
tuple that defines the input size
"""
if len(net_param.input_dim) != 0:
return net_param.input_dim
elif len(net_param.input_shape) != 0:
return net_param.input_shape
else:
print("[MESSAGE] Couldn't find Input shape in the Network Parameters."
"The returned shape is inferenced from the network name")
# try:
# scale = layer.transform_param.scale
# scale = 1 if scale <= 0 else scale
# except AttributeError:
# pass
return []
def check_phase(layer, phase):
"""Check if the layer matches with the target phase.
Parameters
----------
layer : caffe_pb2.V1LayerParameter
A given layer.
phase : int
0 : train
1 : test
"""
try:
return True if layer.include[0].phase == phase else False
except IndexError:
return True
def get_network(layers, phase):
"""Get structure of the network.
Parameters
----------
layers : list
list of layers parsed from network parameters
phase : int
0 : train
1 : test
"""
num_layers = len(layers)
network = OrderedDict()
for i in xrange(num_layers):
layer = layers[i]
if check_phase(layer, phase):
layer_id = "trans_layer_"+str(i)
if layer_id not in network:
network[layer_id] = []
prev_blobs = map(str, layer.bottom)
next_blobs = map(str, layer.top)
for blob in prev_blobs+next_blobs:
if blob not in network:
network[blob] = []
for blob in prev_blobs:
network[blob].append(layer_id)
network[layer_id].extend(next_blobs)
network = remove_loops(network)
network = remove_blobs(network)
return network
def remove_loops(network):
"""Remove potential loops from the network.
Parameters
----------
network : OrderedDict
given network dictionary
new_network : OrderedDict
a loops free altered network.
"""
for e in network:
if e.startswith("trans_layer_"):
continue
idx = 0
while idx < len(network[e]):
next_e = network[e][idx]
if e in network[next_e]:
new_e = e+"_"+str(idx)
network[e].remove(next_e)
network[new_e] = network[e]
network[e] = [next_e]
network[next_e] = [new_e]
for n in network[new_e]:
if network[n] == [e]:
network[n] = [new_e]
e = new_e
idx = 0
else:
idx += 1
return network
def remove_blobs(network):
"""Remove blobs from network.
Parameters
----------
network : OrderedDict
given network dictionary
Returns
-------
new_network : OrderedDict
blobs removed network dictionary
"""
new_network = OrderedDict()
def get_idx(x): return int(x[12:])
for e in network:
if e.startswith("trans_layer_"):
idx = get_idx(e)
if idx not in new_network:
new_network[idx] = []
for next_e in network[e]:
next_es = map(get_idx, network[next_e])
new_network[idx].extend(next_es)
return new_network
def reverse_net(network):
"""Reverse a network.
Parameters
----------
network : OrderedDict
A parsed network
Returns
-------
rev : OrderedDict
reversed network
"""
rev = OrderedDict()
for node in network.keys():
rev[node] = []
for node in network.keys():
for n in network[node]:
rev[n].append(node)
return rev
def get_input_layers(network):
"""Get input layers (layers with zero in-order).
Parameters
----------
network : OrderedDict
A parsed network
Returns
-------
in_layers : list
a list of input layers
"""
return get_output_layers(reverse_net(network))
def get_output_layers(network):
"""Get output layers (layers with zero out-order).
Parameters
----------
network : OrderedDict
A parsed network
Returns
-------
out_layers : list
a list of out layers
"""
out_layers = []
for idx in network:
if network[idx] == []:
out_layers.append(idx)
return out_layers
def get_model(layers, phase, input_dim, model_name, lib_type="keras"):
"""Get a model by given network parameters.
Parameters
----------
layers : list
network structure by given parsed network.
phase : int
0 : train
1 : test
input_dim : list
the input dimension
model_name : string
the name of the given model.
lib_type : string
currently only Keras is supported.
"""
network = get_network(layers, phase)
if len(network) == 0:
raise Exception("No valid network is parsed!")
in_layers = get_input_layers(network)
out_layers = get_output_layers(network)
rev_network = reverse_net(network)
def data_layer(x): get_layer_type(x) in ['data', 'imagedata', 'memorydata',
'hdf5data', 'windowdata']
# remove the link from input to output.
for in_idx in in_layers:
for out_idx in out_layers:
if out_idx in network[in_idx] and data_layer(layers[in_idx]):
network[in_idx].remove[out_idx]
net = [None]*(max(network)+1)
for layer_id in network:
layer = layers[layer_id]
layer_name = layer.name
layer_type = get_layer_type(layer)
if layer_id in in_layers:
net[layer_id] = L.input_layer(input_dim, layer_name)
else:
layer_in = [None]*(len(rev_network[layer_id]))
for l in xrange(len(rev_network[layer_id])):
layer_in[l] = net[rev_network[layer_id][l]]
if layer_type in ["relu", "sigmoid", "softmax", "softmaxwithloss",
"split", "tanh"]:
net[layer_id] = L.activation(act_type=layer_type,
name=layer_name)(layer_in)
elif layer_type == "batchnorm":
epsilon = layer.batchnorm_param.eps
axis = layer.scale_param.axis
net[layer_id] = L.batch_norm(epsilon=epsilon, axis=axis,
name=layer_name)(layer_in)
elif layer_type == "lrn":
alpha = layer.lrn_param.alpha
k = layer.lrn_param.k
beta = layer.lrn_param.beta
n = layer.lrn_param.local_size
net[layer_id] = L.lrn(alpha, k, beta, n, layer_name)(layer_in)
elif layer_type == "scale":
axis = layer.scale_param.axis
net[layer_id] = L.scale(axis, layer_name)(layer_in)
elif layer_type == "dropout":
prob = layer.dropout_param.dropout_ratio
net[layer_id] = L.dropout(prob, name=layer_name)(layer_in)
elif layer_type == "flatten":
net[layer_id] = L.flatten(name=layer_name)(layer_in)
elif layer_type == "concat":
axis = layer.concat_param.axis
net[layer_id] = L.merge(layer_in, mode='concat',
concat_axis=1, name=layer_name)
elif layer_type == "eltwise":
axis = layer.scale_param.axis
op = layer.eltwise_param.operation
if op == 0:
mode = "mul"
elif op == 1:
mode = "sum"
elif op == 2:
mode == "max"
else:
raise NotImplementedError("Operation is not implemented!")
net[layer_id] = L.merge(layer_in, mode=mode, concat_axis=axis,
name=layer_name)
elif layer_type == "innerproduct":
output_dim = layer.inner_product_param.num_output
if len(layer_in[0]._keras_shape[1:]) > 1:
layer_in = L.flatten(name=layer_name+"_flatten")(layer_in)
net[layer_id] = L.dense(output_dim, name=layer_name)(layer_in)
elif layer_type == "convolution":
has_bias = layer.convolution_param.bias_term
nb_filter = layer.convolution_param.num_output
nb_col = (layer.convolution_param.kernel_size or
[layer.convolution_param.kernel_h])[0]
nb_row = (layer.convolution_param.kernel_size or
[layer.convolution_param.kernel_w])[0]
stride_h = (layer.convolution_param.stride or
[layer.convolution_param.stride_h])[0] or 1
stride_w = (layer.convolution_param.stride or
[layer.convolution_param.stride_w])[0] or 1
pad_h = (layer.convolution_param.pad or
[layer.convolution_param.pad_h])[0]
pad_w = (layer.convolution_param.pad or
[layer.convolution_param.pad_w])[0]
if pad_h + pad_w > 0:
layer_in = L.zeropadding(padding=(pad_h, pad_w),
name=layer_name)(layer_in)
net[layer_id] = L.convolution(nb_filter, nb_row, nb_col,
bias=has_bias,
subsample=(stride_h, stride_w),
name=layer_name)(layer_in)
elif layer_type == "pooling":
kernel_h = layer.pooling_param.kernel_size or \
layer.pooling_param.kernel_h
kernel_w = layer.pooling_param.kernel_size or \
layer.pooling_param.kernel_w
stride_h = layer.pooling_param.stride or \
layer.pooling_param.stride_h or 1
stride_w = layer.pooling_param.stride or \
layer.pooling_param.stride_w or 1
pad_h = layer.pooling_param.pad or layer.pooling_param.pad_h
pad_w = layer.pooling_param.pad or layer.pooling_param.pad_w
if pad_h + pad_w > 0:
layer_in = L.zeropadding(padding=(pad_h, pad_w),
name=layer_name)(layer_in)
net[layer_id] = L.pooling(pool_size=(kernel_h, kernel_w),
strides=(stride_h, stride_w),
pool_type=layer.pooling_param.pool,
name=layer_name)(layer_in)
in_l = [None]*(len(in_layers))
out_l = [None]*(len(out_layers))
for i in xrange(len(in_layers)):
in_l[i] = net[in_layers[i]]
for i in xrange(len(out_layers)):
out_l[i] = net[out_layers[i]]
return Model(input=in_l, output=out_l, name=model_name)
def get_network_weights(layers, version):
"""Parse network weights.
Parameters
----------
layers : list
List of parameter layers from caffemodel
version : "string"
"V1" or "V2"
Return
------
net_weights : OrderedDict
network's weights
"""
net_weights = OrderedDict()
for layer in layers:
layer_type = get_layer_type(layer)
if layer_type == "innerproduct":
blobs = layer.blobs
if (version == "V1"):
num_filters = blobs[0].num
num_channels = blobs[0].channels
num_col = blobs[0].height
num_row = blobs[0].width
elif (version == "V2"):
if (len(blobs[0].shape.dim) == 4):
num_filters = int(blobs[0].shape.dim[0])
num_channels = int(blobs[0].shape.dim[1])
num_col = int(blobs[0].shape.dim[2])
num_row = int(blobs[0].shape.dim[3])
else:
num_filters = 1
num_channels = 1
num_col = int(blobs[0].shape.dim[0])
num_row = int(blobs[0].shape.dim[1])
else:
raise Exception("Can't recognize the version %s" % (version))
W = np.array(blobs[0].data).reshape(num_filters, num_channels,
num_col, num_row)[0, 0, :, :]
W = W.T
b = np.array(blobs[1].data)
layer_weights = [W.astype(dtype=np.float32),
b.astype(dtype=np.float32)]
net_weights[layer.name] = layer_weights
elif layer_type == "convolution":
blobs = layer.blobs
if (version == "V1"):
num_filters = blobs[0].num
num_channels = blobs[0].channels
num_col = blobs[0].height
num_row = blobs[0].width
elif (version == "V2"):
num_filters = int(blobs[0].shape.dim[0])
num_channels = int(blobs[0].shape.dim[1])
num_col = int(blobs[0].shape.dim[2])
num_row = int(blobs[0].shape.dim[3])
else:
raise Exception("Can't recognize the version %s" % (version))
num_group = layer.convolution_param.group
num_channels *= num_group
W = np.zeros((num_filters, num_channels, num_col, num_row))
if layer.convolution_param.bias_term:
b = np.array(blobs[1].data)
else:
b = None
group_ds = len(blobs[0].data) // num_group
ncs_group = num_channels // num_group
nfs_group = num_filters // num_group
for i in range(num_group):
group_weights = W[i*nfs_group: (i+1)*nfs_group,
i*ncs_group: (i+1)*ncs_group, :, :]
group_weights[:] = np.array(
blobs[0].data[i*group_ds:
(i+1)*group_ds]).reshape(group_weights.shape)
for i in range(W.shape[0]):
for j in range(W.shape[1]):
W[i, j] = np.rot90(W[i, j], 2)
if b is not None:
layer_weights = [W.astype(dtype=np.float32),
b.astype(dtype=np.float32)]
else:
layer_weights = [W.astype(dtype=np.float32)]
net_weights[layer.name] = layer_weights
elif layer_type == "batchnorm":
blobs = layer.blobs
if (version == "V2"):
num_kernels = int(blobs[0].shape.dim[0])
else:
raise NotImplementedError("Batchnorm is not "
"implemented in %s" % (version))
W_mean = np.array(blobs[0].data)
W_std = np.array(blobs[1].data)
net_weights[layer.name] = [ | np.ones(num_kernels) | numpy.ones |
""" Generates percepts """
import numpy as np
import random
import h5py
import os
import json
from datetime import datetime
import argparse
import pulse2percept as p2p
def rand_stim(implant, n_electrodes=1):
maxamp = 10
maxfreq = 200
# randomly pick UP TO n_electrodes
sample_elecs = random.randint(1, n_electrodes)
elecs = random.sample([i for i in range(len(implant.electrodes))], sample_elecs)
stim = np.zeros((len(implant.electrodes), 3), dtype='float32')
for elec in elecs:
amp = random.random() * (maxamp - 1) + 1
freq = random.random() * (maxfreq - 1) + 1
pdur = random.expovariate(1)
while pdur > 1000 / freq / 2 or pdur < 0.01 or pdur > 100:
pdur = random.expovariate(1)
stim[elec] = np.array([freq, amp, pdur])
return stim
def rand_percepts(model, implant, n_elecs=1, n_samples=10000):
model.build()
x = | np.array([implant[e].x for e in implant.electrodes], dtype='float32') | numpy.array |
import math
from functools import lru_cache
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy.optimize import leastsq
from astropy.stats import sigma_clipped_stats
from photutils import DAOStarFinder
from astropy.visualization import SqrtStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from photutils import CircularAperture
from scipy import stats
from visnav.algo import tools
from visnav.algo.image import ImageProc
from visnav.algo.model import Camera
from visnav.calibration.base import Measure, Frame, merge, RAW_IMG_MAX_VALUE
from visnav.calibration.spectrum import get_star_spectrum, sensed_electron_flux_star_spectrum
from visnav.render.stars import Stars
from visnav.render.sun import Sun
DEBUG_EXTRACTION = 0
DEBUG_CHANNELS = 0
DEBUG_MATCHING = 0 # show 1=tycho, 2=t_eff, 3=mag_v
MANUAL_ATTITUDE = 0
SHOW_MEASURES = 0
STAR_SPECTRA_PATH = r'C:\projects\s100imgs\spectra'
class StarMeasure(Measure):
def __init__(self, frame, cam_i, obj_id, du_count, t_eff, fe_h, log_g, mag_v, ixy, weight=1):
super(StarMeasure, self).__init__(frame, cam_i, obj_id, du_count, weight=weight)
self.t_eff = t_eff
self.fe_h = fe_h
self.log_g = log_g
self.mag_v = mag_v
self.ixy = ixy
simbad = Stars.get_property_by_id(self.obj_id[0], 'simbad')
self.bayer = simbad.strip(' *').lower().replace(' ', '_').replace('alf_', 'alp_')
self.c_unsat_du = None
self.c_px_du_sat = None
def expected_du(self, pre_sat_gain=1, post_sat_gain=1, qeff_coefs=None, psf_coef=(1, 1, 1)):
cam = self.frame.cam[self.cam_i]
cgain = cam.gain * cam.aperture_area * cam.emp_coef
fgain = self.frame.gain * self.frame.exposure
queff_coefs = tuple(cam.qeff_coefs if qeff_coefs is None else qeff_coefs[self.cam_i])
if 0:
p_elec, _ = Camera.electron_flux_in_sensed_spectrum(queff_coefs, self.t_eff, self.fe_h, self.log_g,
self.mag_v, cam.lambda_min, cam.lambda_max)
if 1:
gomos_mag_v = self.mag_v # if self.bayer == 'alp_ori' else None
electrons = sensed_electron_flux_star_spectrum(STAR_SPECTRA_PATH, self.bayer, self.mag_v, self.t_eff,
self.log_g, self.fe_h, cam.lambda_min, cam.lambda_max,
queff_coefs, gomos_mag_v)
if 0: #self.bayer == 'alp_ori':
spectrum_fn0 = Stars.synthetic_radiation_fn(self.t_eff, self.fe_h, self.log_g, mag_v=self.mag_v)
spectrum_fn0b = Stars.synthetic_radiation_fn(self.t_eff, self.fe_h, self.log_g, mag_v=self.mag_v,
model='ck04models',
lam_min=cam.lambda_min - 10e-9, lam_max=cam.lambda_max + 10e-9)
spectrum_fn1 = get_star_spectrum(STAR_SPECTRA_PATH, self.bayer, self.mag_v, self.t_eff, self.log_g, self.fe_h,
cam.lambda_min, cam.lambda_max, gomos_mag_v)
lams = np.linspace(cam.lambda_min, cam.lambda_max, 3000)
plt.plot(lams, spectrum_fn0(lams))
plt.plot(lams, spectrum_fn0b(lams))
plt.plot(lams, spectrum_fn1(lams))
plt.title(self.bayer)
plt.show()
du = pre_sat_gain * RAW_IMG_MAX_VALUE * fgain * cgain * electrons
self.c_unsat_du = du
if StarFrame.STAR_SATURATION_MODELING == StarFrame.STAR_SATURATION_MODEL_MOTION:
psf_coef = tuple(psf_coef) if StarFrame.STAR_SATURATION_MULTI_KERNEL else \
((psf_coef[self.cam_i],) if len(psf_coef) == 3 else tuple(psf_coef))
du, self.c_px_du_sat = self._motion_kernel_psf_saturation(du, psf_coef, True)
elif StarFrame.STAR_SATURATION_MODELING == StarFrame.STAR_SATURATION_MODEL_ANALYTICAL:
du = self._analytical_psf_saturation(du, psf_coef[self.cam_i])
else:
assert StarFrame.STAR_SATURATION_MODELING == StarFrame.STAR_SATURATION_MODEL_IDEAL
# do nothing
du *= post_sat_gain
self.c_expected_du = du
return du
def _analytical_psf_saturation(self, du, psf_sd):
psf_coef = psf_sd**2 * 2 * np.pi
center_px_val = du / psf_coef
if center_px_val < self.frame.max_signal:
sat_du = psf_coef * self.frame.max_signal * (1 + np.log(center_px_val / self.frame.max_signal))
else:
sat_du = du
return sat_du
def _motion_kernel_psf_saturation(self, du, psf_sd, get_px_du_sat=False):
read_sd = None
if len(psf_sd) in (2, 4):
psf_sd, read_sd = psf_sd[:-1], psf_sd[-1]
line_xy = self.frame.motion_in_px(self.ixy)
mb_psf = self._get_motion_kernel(psf_sd, line_xy)
px_du_sat = np.clip(mb_psf * du, 0, self.frame.max_signal)
if read_sd:
noise = trunc_gaussian_shift(px_du_sat, read_sd * self.frame.max_signal, self.frame.max_signal)
if 1:
px_du_sat = np.clip(px_du_sat - noise, 0, self.frame.max_signal)
du_sat = np.sum(px_du_sat)
else:
noise = np.random.normal(0, read_sd * saturation_val, px_du_sat.shape)
noise = cv2.filter2D(noise, cv2.CV_64F, ImageProc.gkern2d(5, 1.0))
px_du_sat = np.clip(px_du_sat + noise, 0, saturation_val)
else:
du_sat = np.sum(px_du_sat)
return (du_sat,) + ((px_du_sat,) if get_px_du_sat else tuple())
@staticmethod
@lru_cache(maxsize=20)
def _get_motion_kernel(psf_sd, line_xy):
if len(psf_sd) == 3:
sd1, w, sd2 = psf_sd
else:
sd1, w, sd2 = psf_sd[0], 0, 0
psf_hw = math.ceil(max(sd1 * 3, sd2 * 2))
psf_fw = 1 + 2 * psf_hw
psf = ImageProc.gkern2d(psf_fw, sd1) + (0 if w == 0 else w * ImageProc.gkern2d(psf_fw, sd2))
line_xy = np.array(line_xy)
line = np.zeros(np.ceil(np.abs(np.flip(line_xy))).astype(np.int) + psf_fw)
cnt = np.flip(line.shape)/2
start = tuple(np.round(cnt - line_xy/2).astype(np.int))
end = tuple(np.round(cnt + line_xy/2).astype(np.int))
cv2.line(line, start, end, color=1.0, thickness=1, lineType=cv2.LINE_AA)
mb_psf = cv2.filter2D(line, cv2.CV_64F, psf)
mb_psf /= np.sum(mb_psf) # normalize to one
return mb_psf
def trunc_gaussian_shift(mean, sd, upper_limit):
# from https://en.wikipedia.org/wiki/Truncated_normal_distribution
beta = (upper_limit - mean) / sd
shift = sd * stats.norm.pdf(beta) / stats.norm.cdf(beta)
return shift
class StarFrame(Frame):
(
STAR_SATURATION_MODEL_IDEAL,
STAR_SATURATION_MODEL_ANALYTICAL,
STAR_SATURATION_MODEL_MOTION,
) = range(3)
STAR_SATURATION_MODELING = STAR_SATURATION_MODEL_MOTION
STAR_SATURATION_MULTI_KERNEL = False
def __init__(self, *args, q=None, override_star_data=None, **kwargs):
super(StarFrame, self).__init__(*args, **kwargs)
def detect(imgc):
_, imgc = cv2.threshold(imgc, 560, 255, type=cv2.THRESH_BINARY)
imgc = cv2.dilate(imgc, np.ones((3, 3)))
imgc = cv2.erode(imgc, np.ones((3, 3)), iterations=2)
imgc = cv2.dilate(imgc, np.ones((3, 3)))
return imgc
b_mask = detect(self.image[:, :, 0])
g_mask = detect(self.image[:, :, 1])
r_mask = detect(self.image[:, :, 2])
b_mean = np.mean(self.image[:, :, 0][b_mask > 0])
g_mean = np.mean(self.image[:, :, 1][g_mask > 0])
r_mean = np.mean(self.image[:, :, 2][r_mask > 0])
bg_mean = np.median(self.image)
bn, gn, rn = np.sum(b_mask > 0), np.sum(g_mask > 0), np.sum(r_mask > 0)
sat_mean = (bn * b_mean + gn * g_mean + rn * r_mean) / (bn + gn + rn)
self.max_signal = sat_mean - bg_mean
self.override_star_data = override_star_data or {}
self.stars = []
self.q = q
self.mb_cnt_ixy = None
self.mb_angle = None
@classmethod
def process_metadata(cls, frame, meta):
if meta.get('dec', False):
frame.set_orientation(dec_ra_pa=(meta.get('dec'), meta.get('ra'), meta.get('pa', 0)))
if meta.get('mb_cnt_ixy', False) is not False:
frame.mb_cnt_ixy = meta.get('mb_cnt_ixy')
frame.mb_angle = math.radians(meta.get('mb_angle'))
def motion_in_px(self, ixy):
r = np.linalg.norm(np.array(ixy) - self.mb_cnt_ixy)
x, y = np.array(ixy) - self.mb_cnt_ixy
line_dir = np.arctan2(-y, x) - np.pi/2
# (2 * np.pi * r) * (self.mb_angle / 2 / np.pi) -- full circle perimeter * ratio of the whole circle
line_len = r * self.mb_angle
x, y = line_len * np.cos(line_dir), -line_len * np.sin(line_dir)
return x, y
def set_orientation(self, q=None, angleaxis=None, dec_ra_pa=None):
if q is not None:
self.q = q
elif angleaxis is not None:
self.q = tools.angleaxis_to_q(angleaxis)
else:
assert dec_ra_pa is not None, 'no orientation given'
dec, ra, pa = map(math.radians, dec_ra_pa)
self.q = tools.ypr_to_q(dec, ra, pa)
def detect_stars(self, thumbnail=True):
stars_detected = self._extract_stars()
if self.q is None:
self.determine_orientation(stars_detected)
self.measures, self.star_measures = self.finetune_orientation(stars_detected, thumbnail=thumbnail)
return self.measures, self.star_measures
def determine_orientation(self, stars):
assert False, 'not implemented' # use deep learning? or custom features and a bag of words type thing?
def finetune_orientation(self, stars, iterations=100, thumbnail=True):
""" match stars based on proximity, fine tune orientation, create measure objects """
MIN_MATCHES = 3
# use ICP-like algorithm
matches = []
for i in range(iterations):
matches, cols = self._match_stars(stars, max_dist=max(0.02, 0.05-i*0.01), mag_cutoff=3.0 if thumbnail else 3.0)
if np.sum([j is not None for j in matches]) < MIN_MATCHES:
break
if self._update_ori(matches, cols, stars):
break # if update small enough, stop iterating
matches, cols = self._match_stars(stars, max_dist=0.02, mag_cutoff=3.0 if thumbnail else 3.0, plot=SHOW_MEASURES)
def ifna(v, d):
return d if v is None or np.isnan(v) else v
measures = []
star_meas = {}
mag_adj = np.median([stars[i]['mag'] - m[cols['mag_v']] for i, m in enumerate(matches) if m is not None])
for i, m in enumerate(matches):
if m is not None:
cid = '&'.join([Stars.get_catalog_id(id) for id in m[cols['id']]])
for band, j in enumerate(('b', 'g', 'r') if len(self.cam) == 3 else ('v',)):
t_eff = float(ifna(m[cols['t_eff']], -1))
fe_h = float(ifna(m[cols['fe_h']], Sun.METALLICITY))
log_g = float(ifna(m[cols['log_g']], Sun.LOG_SURFACE_G))
t_est = 0
if t_eff < 0:
t_est = 1
mag_v, mag_b = m[cols['mag_v']], m[cols['mag_b']]
if mag_b is None or np.isnan(mag_b):
print('Both t_eff AND mag_b missing! ID=%s' % (m[cols['id']],))
mag_b = mag_v
t_eff = Stars.effective_temp(mag_b - mag_v, fe_h, log_g)
print('star %s, missing t_eff, estimated as %.1f' % (cid, t_eff))
measures.append(StarMeasure(self, band, m[cols['id']], stars[i]['du_' + j],
t_eff, fe_h, log_g, m[cols['mag_v']], (stars[i]['x'], stars[i]['y'])))
merge(star_meas, {m[cols['id']]: [{'meas': (stars[i]['du_b'], stars[i]['du_g'], stars[i]['du_r']),
'm_mag_v': stars[i]['mag'] - mag_adj,
't_eff': ('(%.0f)' if t_est else '%.0f') % t_eff,
'fe_h': m[cols['fe_h']], 'log_g': m[cols['log_g']],
'mag_v': m[cols['mag_v']]}]})
return measures, star_meas
def _extract_stars(self):
""" extract stars from image, count "digital units" after bg substraction, calc centroid x, y """
# scaled to 0-1 and in grayscale
data = np.mean(self.image.astype(np.float64)/(2**self.bits-1), axis=2)
mean, median, std = sigma_clipped_stats(data, sigma=3.0)
thumbnail = self.image.shape[1] == 128
bsize = 4 if thumbnail else 20
assert self.image.shape[1] in (128, 2048), 'unsupported image size'
if thumbnail:
daofind = DAOStarFinder(fwhm=3.5, threshold=5.*std, sharplo=.3, sharphi=1.3, roundlo=-.8, roundhi=1.3)
else:
daofind = DAOStarFinder(fwhm=28, threshold=12.*std, sharplo=-3.0, sharphi=3.0, roundlo=-3.0, roundhi=3.0)
sources = daofind(data - median)
positions = np.transpose((sources['xcentroid'], sources['ycentroid']))
if self.debug and (DEBUG_EXTRACTION or DEBUG_CHANNELS):
norm = ImageNormalize(stretch=SqrtStretch())
if DEBUG_CHANNELS:
if 0:
f, (b, g, r) = plt.subplots(1, 3)
b.imshow(self.image[:, :, 0].astype(np.float64) / (2 ** self.bits - 1), cmap='Greys', norm=norm)
g.imshow(self.image[:, :, 1].astype(np.float64) / (2 ** self.bits - 1), cmap='Greys', norm=norm)
r.imshow(self.image[:, :, 2].astype(np.float64) / (2 ** self.bits - 1), cmap='Greys', norm=norm)
b.set_title('blue')
g.set_title('green')
r.set_title('red')
else:
f, (w, b_r, g_r) = plt.subplots(1, 3, sharex=True, sharey=True)
w.imshow(data, cmap='Greys_r', norm=norm)
br = (self.image[:, :, 0].astype(np.float64) - self.image[:, :, 2].astype(np.float64)) / (2 ** self.bits - 1)
gr = (self.image[:, :, 1].astype(np.float64) - self.image[:, :, 2].astype(np.float64)) / (2 ** self.bits - 1)
b_r.imshow(br - np.min(br), cmap='Greys_r', norm=norm)
g_r.imshow(gr - np.min(gr), cmap='Greys_r', norm=norm)
w.set_title('white')
b_r.set_title('blue - red')
g_r.set_title('green - red')
plt.tight_layout()
plt.show()
else:
plt.imshow(data, cmap='Greys', norm=norm)
apertures = CircularAperture(positions, r=bsize)
apertures.plot(color='blue', lw=1.5, alpha=0.5)
plt.show()
stars = []
img_median = np.median(self.image.reshape((-1, 3)), axis=0)
for i, (x, y) in enumerate(positions):
if thumbnail:
size = 4
elif sources[i]['flux'] > 16:
size = 30
elif sources[i]['flux'] > 6:
size = 25
elif sources[i]['flux'] > 2:
size = 20
else:
size = 17
(b, b0), (g, g0), (r, r0) = self._count_du(x, y, size=2*size+1, bg=img_median)
if b is not None and (b-b0 > 0 or g-g0 > 0 or r-r0 > 0):
# TODO: add black level remove level to .lbl files?
# - unknown black level was removed in sensor, from param tables: 168, but that doesnt work for all images
# - for now, add something here but then adjust at match_stars based on brightest & dimmest
#bg = 168/8 # 168
#b0, g0, r0 = b0 + bg, g0 + bg, r0 + bg
mag = -2.5 * math.log10((b+b0) * (g+g0) * (r+r0) / b0 / g0 / r0) / 3
stars.append({"du_b": b, "du_g": g, "du_r": r, "x": x, "y": y, "mag": mag, "size": size})
else:
print('discarded [%d, %d]' % (x, y))
return stars
def _count_du(self, x, y, size=5, bg=None):
wmrg = size//4
mmrg = 1 if bg is None else 0
mask = ImageProc.bsphkern(size + 2*mmrg)
if bg is None:
mask[0, :] = 0
mask[-1, :] = 0
mask[:, 0] = 0
mask[:, -1] = 0
mask = mask.astype(np.bool)
mr = size//2 + mmrg
mn = size + 2*mmrg
h, w, _ = self.image.shape
x, y = int(round(x)), int(round(y))
if h-y+wmrg <= mr or w-x+wmrg <= mr or x+wmrg < mr or y+wmrg < mr:
return zip([None] * 3, [None] * 3)
win = self.image[max(0, y-mr):min(h, y+mr+1), max(0, x-mr):min(w, x+mr+1), :].reshape((-1, 3))
mx0, mx1 = -min(0, x-mr), mn - (max(w, x+mr+1) - w)
my0, my1 = -min(0, y-mr), mn - (max(h, y+mr+1) - h)
mask = mask[my0:my1, mx0:mx1].flatten()
bg = np.mean(win[np.logical_not(mask), :], axis=0) if bg is None else bg
if False:
tot = np.sum(win[mask, :], axis=0)
tot_bg = bg * np.sum(mask)
tot = np.max(np.array((tot, tot_bg)), axis=0)
# tried to take into account thumbnail mean resizing after gamma correction,
# also assuming no saturation of original pixels because of motion blur
# => better results if tune Camera->emp_coef instead
resizing_gain = (1/self.resize_scale)**2
g = self.applied_gamma
# ([sum over i in n: (bg+s_i)**g] / n) ** (1/g)
# => cannot compensate for gamma correction as signal components not summable anymore,
# only possible if assume that only a single pixel has signal (or some known distribution of signal?)
# signal in a single, non-saturating pixel (conflicting assumptions):
adj_tot = (((tot-tot_bg+bg)**g*resizing_gain) - (resizing_gain-1)*bg**g)**(1/g) - bg
signal = adj_tot
else:
#signal = tot - tot_bg
signal = np.clip(np.sum(win[mask, :] - bg, axis=0), 0, np.inf)
return zip(signal, bg)
def _match_stars(self, stars, max_dist=0.05, max_mag_diff=2.0, mag_cutoff=3.0, plot=False):
""" match stars based on proximity """
merge_lim = 4
all_stars, cols = Stars.flux_density(self.q, self.cam[0], array=True, undistorted=True,
mag_cutoff=mag_cutoff+merge_lim, order_by='mag_v')
if self.debug:
db_img = np.sqrt(Stars.flux_density(self.q, self.cam[0], mag_cutoff=10.0))
# override some star data, change None => nan
for i, st in enumerate(all_stars):
for j in range(len(st)):
st[j] = np.nan if st[j] is None else st[j]
if st[cols['id']] in self.override_star_data:
for f in ('mag_v', 'mag_b', 't_eff', 'log_g', 'fe_h'):
od = self.override_star_data[st[cols['id']]]
if f in od:
all_stars[i][cols[f]] = od[f]
# merge close stars
all_stars = np.array(all_stars)
points = np.array([(s[cols['ix']], s[cols['iy']]) for s in all_stars])
D = tools.distance_mx(points, points)
radius = 10 if self.cam[0].width > 300 else 2
db_stars = []
added = set()
for i, s in enumerate(all_stars):
if i in added:
continue
I = tuple(set(np.where(
np.logical_and(D[i, :] < radius, all_stars[:, cols['mag_v']]-merge_lim < s[cols['mag_v']])
)[0]) - added)
cluster = [None]*(max(cols.values())+1)
cluster[cols['id']] = tuple(all_stars[I, cols['id']].astype(np.int))
amag_v = 10**(-all_stars[I, cols['mag_v']]/2.5)
amag_b = 10**(-all_stars[I, cols['mag_b']]/2.5)
cluster[cols['mag_v']] = -2.5*np.log10(np.sum(amag_v))
cluster[cols['mag_b']] = -2.5*np.log10(np.sum(amag_b))
for c in ('ix', 'iy', 'dec', 'ra', 't_eff', 'fe_h', 'log_g'):
E = np.where(all_stars[I, cols[c]] != None)[0]
cluster[cols[c]] = | np.sum(amag_v[E] * all_stars[I, cols[c]][E]) | numpy.sum |
import numpy as np
import scipy.signal as sig
from scipy.integrate import cumtrapz
from .rotate import inst2earth, _rotate_vel2body
import warnings
class CalcMotion(object):
"""
A 'calculator' for computing the velocity of points that are
rigidly connected to an ADV-body with an IMU.
Parameters
----------
advo : `adv_raw<dolfyn.adv.base.adv_raw>`
The IMU-adv object that will be used to compute motion.
accel_filtfreq : float
the frequency at which to high-pass filter the acceleration
signal to remove low-frequency drift.
vel_filtfreq : float (optional)
a second frequency to high-pass filter the integrated
acceleration. (default: 1/3 of accel_filtfreq)
Examples
--------
>>> from dolfyn.adv import api as avm
>>> from dolfyn.adv import motion as avmot
>>> dat = avm.read_nortek('my_data_file.vec')
>>> mcalc = avmot.CalcMotion(dat)
# Calculate the motion of a point that is (.3, .1, .06) meters
# from the adv-body origin:
>>> mot = mcalc([.3, .1, .06])
"""
def __init__(self, advo,
accel_filtfreq=1. / 30,
vel_filtfreq=None,
to_earth=True):
self.advo = advo
self.accel_filtfreq = accel_filtfreq
if vel_filtfreq is None:
vel_filtfreq = accel_filtfreq / 3
self.accelvel_filtfreq = vel_filtfreq
self.to_earth = to_earth
self._set_Accel()
self._set_AccelStable()
self.AngRt = advo.AngRt # No copy because not modified.
def _set_Accel(self, ):
advo = self.advo
if advo.props['coord_sys'] == 'inst':
self.Accel = np.einsum('ijk,ik->jk',
advo.orientmat,
advo.Accel)
elif self.advo.props['coord_sys'] == 'earth':
self.Accel = advo.Accel.copy()
else:
raise Exception(("Invalid coordinate system '%s'. The coordinate "
"system must either be 'earth' or 'inst' to "
"perform motion correction.")
% (self.advo.props['coord_sys'], ))
def _set_AccelStable(self, ):
"""
"""
self.AccelStable = acc = self.Accel.copy()
if self.accel_filtfreq == 0:
acc[:] = acc.mean(-1)[..., None]
else:
flt = sig.butter(1, self.accel_filtfreq / (self.advo.fs / 2))
for idx in range(3):
acc[idx] = sig.filtfilt(flt[0], flt[1], acc[idx])
def __call__(self, vec):
"""
Calculate the motion of the point specified by vec (in meters,
in the adv-body coordinate system).
Parameters
----------
vec : |np.ndarray| (len(3) or 3 x M)
The vector in meters (or set of vectors) from the
body-origin (center of head end-cap) to the point of
interest (in the body coord-sys).
Returns
-------
umot : |np.ndarray| (3 x M x N_time)
The motion (velocity) array (3, n_time).
"""
return self.calc_uacc() + self.calc_urot(np.array(vec), )
def calc_uacc(self, ):
"""
Calculates the translational velocity from the acceleration
signal.
Returns
-------
uacc : |np.ndarray| (3 x n_time)
The acceleration-induced velocity array (3, n_time).
"""
samp_freq = self.advo.fs
hp = self.Accel - self.AccelStable
dat = np.concatenate((np.zeros(list(hp.shape[:-1]) + [1]),
cumtrapz(hp, dx=1. / samp_freq)), axis=-1)
if self.accelvel_filtfreq > 0:
filt_freq = self.accelvel_filtfreq
# 8th order butterworth filter.
filt = sig.butter(2, float(filt_freq) / (samp_freq / 2))
for idx in range(hp.shape[0]):
dat[idx] = dat[idx] - sig.filtfilt(filt[0], filt[1], dat[idx])
return dat
def calc_urot(self, vec, to_earth=None):
"""
Calculate the induced velocity due to rotations of the instrument
about the IMU center.
Parameters
----------
vec : |np.ndarray| (len(3) or 3 x M)
The vector in meters (or vectors) from the body-origin
(center of head end-cap) to the point of interest (in the
body coord-sys).
Returns
-------
urot : |np.ndarray| (3 x M x N_time)
The rotation-induced velocity array (3, n_time).
"""
if to_earth is None:
to_earth = self.to_earth
dimflag = False
if vec.ndim == 1:
vec = vec.copy().reshape((3, 1))
dimflag = True
# Correct for the body->imu distance.
# The nortek_body2imu vector is subtracted because of
# vector addition:
# body2head = body2imu + imu2head
# Thus:
# imu2head = body2head - body2imu
vec = vec - self.advo.body2imu_vec[:, None]
# This motion of the point *vec* due to rotations should be the
# cross-product of omega (rotation vector) and the vector.
# u=dz*omegaY-dy*omegaZ,v=dx*omegaZ-dz*omegaX,w=dy*omegaX-dx*omegaY
# where vec=[dx,dy,dz], and AngRt=[omegaX,omegaY,omegaZ]
urot = np.array([(vec[2][:, None] * self.AngRt[1] -
vec[1][:, None] * self.AngRt[2]),
(vec[0][:, None] * self.AngRt[2] -
vec[2][:, None] * self.AngRt[0]),
(vec[1][:, None] * self.AngRt[0] -
vec[0][:, None] * self.AngRt[1]),
])
if to_earth:
urot = np.einsum('jik,jlk->ilk', self.advo['orientmat'], urot)
if dimflag:
return urot[:, 0, :]
return urot
def _calc_probe_pos(advo, separate_probes=False):
"""
!!!Currently this only works for Nortek Vectors!
In the future, we could use the transformation matrix (and a
probe-length lookup-table?)
"""
# According to the ADV_DataSheet, the probe-length radius is
# 8.6cm @ 120deg from probe-stem axis. If I subtract 1cm
# (!!!checkthis) to get acoustic receiver center, this is
# 7.6cm. In the coordinate sys of the center of the probe
# then, the positions of the centers of the receivers is:
if advo.make_model == 'Nortek VECTOR' and separate_probes:
r = 0.076
# The angle between the x-y plane and the probes
phi = -30. * np.pi / 180.
theta = np.array([0., 120., 240.]) * np.pi / \
180. # The angles of the probes from the x-axis.
return (np.dot(advo.props['body2head_rotmat'].T,
np.array([r * np.cos(theta),
r * np.sin(theta),
r * np.tan(phi) * np.ones(3)])) +
advo.props['body2head_vec'][:, None]
)
else:
return advo.props['body2head_vec']
def correct_motion(advo,
accel_filtfreq=1. / 30,
vel_filtfreq=None,
to_earth=True,
separate_probes=False, ):
"""
This function performs motion correction on an IMU-ADV data
object. The IMU and ADV data should be tightly synchronized and
contained in a single data object.
Parameters
----------
advo : dolfyn.adv.adv class
accel_filtfreq : float
the frequency at which to high-pass filter the acceleration
signal to remove low-frequency drift.
vel_filtfreq : float (optional)
a second frequency to high-pass filter the integrated
acceleration. (default: 1/3 of accel_filtfreq)
to_earth : bool (optional, default: True)
All variables in the advo.props['rotate_vars'] list will be
rotated into either the earth frame (to_earth=True) or the
instrument frame (to_earth=False).
separate_probes : bool (optional, default: False)
a flag to perform motion-correction at the probe tips, and
perform motion correction in beam-coordinates, then transform
back into XYZ/earth coordinates. This correction seems to be
lower than the noise levels of the ADV, so the defualt is to not
use it (False).
Returns
-------
This function returns None, it operates on the input data object,
``advo``. The following attributes are added to `advo`:
``uraw`` is the uncorrected velocity
``urot`` is the rotational component of the head motion (from
AngRt)
``uacc`` is the translational component of the head motion (from
Accel)
``AccelStable`` is the low-pass filtered Accel signal
The primary velocity vector attribute, ``_u``, is motion corrected
such that:
_u = uraw + urot + uacc
The signs are correct in this equation. The measured velocity
induced by head-motion is *in the opposite direction* of the head
motion. i.e. when the head moves one way in stationary flow, it
measures a velocity in the opposite direction. Therefore, to
remove the motion from the raw signal we *add* the head motion.
Notes
-----
Acceleration signals from inertial sensors are notorious for
having a small bias that can drift slowly in time. When
integrating these signals to estimate velocity the bias is
amplified and leads to large errors in the estimated
velocity. There are two methods for removing these errors,
1) high-pass filter the acceleration signal prior and/or after
integrating. This implicitly assumes that the low-frequency
translational velocity is zero.
2) provide a slowly-varying reference position (often from a GPS)
to an IMU that can use the signal (usually using Kalman
filters) to debias the acceleration signal.
Because method (1) removes `real` low-frequency acceleration,
method (2) is more accurate. However, providing reference position
estimates to undersea instruments is practically challenging and
expensive. Therefore, lacking the ability to use method (2), this
function utilizes method (1).
For deployments in which the ADV is mounted on a mooring, or other
semi-fixed structure, the assumption of zero low-frequency
translational velocity is a reasonable one. However, for
deployments on ships, gliders, or other moving objects it is
not. The measured velocity, after motion-correction, will still
hold some of this contamination and will be a sum of the ADV
motion and the measured velocity on long time scales. If
low-frequency motion is known separate from the ADV (e.g. from a
bottom-tracking ADP, or from a ship's GPS), it may be possible to
remove that signal from the ADV signal in post-processing. The
accuracy of this approach has not, to my knowledge, been tested
yet.
Examples
--------
>>> from dolfyn.adv import api as avm
>>> dat = avm.read_nortek('my_data_file.vec')
>>> avm.motion.correct_motion(dat)
``dat`` will now have motion-corrected.
"""
if hasattr(advo, 'urot'):
raise Exception('The data object already appears to have been motion corrected.')
if advo.props['coord_sys'] != 'inst':
raise Exception('The data object must be in the instrument frame to be motion corrected.')
if vel_filtfreq is None:
vel_filtfreq = accel_filtfreq / 3
# Be sure the velocity data has been rotated to the body frame.
_rotate_vel2body(advo)
# Create the motion 'calculator':
calcobj = CalcMotion(advo,
accel_filtfreq=accel_filtfreq,
vel_filtfreq=vel_filtfreq,
to_earth=to_earth)
##########
# Calculate the translational velocity (from the Accel):
advo.groups['orient'].add('uacc')
advo.uacc = calcobj.calc_uacc()
# Copy AccelStable to the adv-object.
advo.groups['orient'].add('AccelStable')
advo.AccelStable = calcobj.AccelStable
##########
# Calculate rotational velocity (from AngRt):
pos = _calc_probe_pos(advo, separate_probes)
# Calculate the velocity of the head (or probes).
urot = calcobj.calc_urot(pos, to_earth=False)
if separate_probes:
# The head->beam transformation matrix
transMat = advo.config.head.get('TransMatrix', None)
# The body->head transformation matrix
rmat = advo.props['body2head_rotmat']
# 1) Rotate body-coordinate velocities to head-coord.
urot = np.dot(rmat, urot)
# 2) Rotate body-coord to beam-coord (einsum),
# 3) Take along beam-component (diagonal),
# 4) Rotate back to head-coord (einsum),
urot = np.einsum('ij,kj->ik',
transMat,
np.diagonal(np.einsum('ij,jkl->ikl',
| np.linalg.inv(transMat) | numpy.linalg.inv |
# Author: <NAME>
# Contributors: <NAME>, <NAME>
import numpy as np
import torch
from nose.tools import raises
from cgnet.feature.utils import (GaussianRBF, PolynomialCutoffRBF,
ShiftedSoftplus, _AbstractRBFLayer)
from cgnet.feature.statistics import GeometryStatistics
from cgnet.feature.feature import GeometryFeature, Geometry
# Define sizes for a pseudo-dataset
frames = np.random.randint(10, 30)
beads = np.random.randint(5, 10)
g = Geometry(method='torch')
@raises(NotImplementedError)
def test_radial_basis_function_len():
# Make sure that a NotImplementedError is raised if an RBF layer
# does not have a __len__() method
# Here, we use the _AbstractRBFLayer base class as our RBF
abstract_RBF = _AbstractRBFLayer()
# Next, we check to see if the NotImplementedError is raised
# This is done using the decorator above, because we cannot
# use nose.tools.assert_raises directly on special methods
len(abstract_RBF)
def test_radial_basis_function():
# Make sure radial basis functions are consistent with manual calculation
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance)
gauss_layer = rbf.forward(distances)
# Manually calculate expansion with numpy
# according to the following formula:
# e_k (r_j - r_i) = exp(- \gamma (\left \| r_j - r_i \right \| - \mu_k)^2)
# with centers mu_k calculated on a uniform grid between
# zero and the distance cutoff and gamma as a scaling parameter.
centers = np.linspace(low_cutoff, high_cutoff,
n_gaussians).astype(np.float64)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = np.exp(gamma * magnitude_squared)
# Shapes and values need to be the same
np.testing.assert_equal(centers.shape, rbf.centers.shape)
np.testing.assert_allclose(gauss_layer.numpy(), gauss_manual, rtol=1e-5)
def test_radial_basis_function_distance_masking():
# Makes sure that if a distance mask is used, the corresponding
# expanded distances returned by GaussianRBF are zero
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
n_gaussians = np.random.randint(5, 10)
neighbor_cutoff = np.abs(np.random.rand())
neighbors, neighbor_mask = g.get_neighbors(distances,
cutoff=neighbor_cutoff)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance)
gauss_layer = rbf.forward(distances, distance_mask=neighbor_mask)
# Lastly, we check to see that the application of the mask is correct
# against a manual calculation and masking
centers = np.linspace(low_cutoff, high_cutoff, n_gaussians)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = torch.tensor(np.exp(gamma * magnitude_squared))
gauss_manual = gauss_manual * neighbor_mask[:, :, :, None].double()
np.testing.assert_array_almost_equal(gauss_layer.numpy(),
gauss_manual.numpy())
def test_radial_basis_function_normalize():
# Tests to make sure that the output of GaussianRBF is properly
# normalized if 'normalize_output' is specified as True
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
variance = np.random.random() + 1
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
# Calculate Gaussian expansion using the implemented layer
rbf = GaussianRBF(high_cutoff=high_cutoff, low_cutoff=low_cutoff,
n_gaussians=n_gaussians, variance=variance,
normalize_output=True)
gauss_layer = rbf.forward(distances)
# Manually calculate expansion with numpy
# according to the following formula:
# e_k (r_j - r_i) = exp(- \gamma (\left \| r_j - r_i \right \| - \mu_k)^2)
# with centers mu_k calculated on a uniform grid between
# zero and the distance cutoff and gamma as a scaling parameter.
centers = np.linspace(low_cutoff, high_cutoff,
n_gaussians).astype(np.float64)
gamma = -0.5 / variance
distances = np.expand_dims(distances, axis=3)
magnitude_squared = (distances - centers)**2
gauss_manual = np.exp(gamma * magnitude_squared)
# manual output normalization
gauss_manual = gauss_manual / np.sum(gauss_manual, axis=3)[:, :, :, None]
# Shapes and values need to be the same
np.testing.assert_equal(centers.shape, rbf.centers.shape)
np.testing.assert_allclose(gauss_layer.numpy(), gauss_manual, rtol=1e-5)
def test_polynomial_cutoff_rbf():
# Make sure the polynomial_cutoff radial basis functions are consistent with
# manual calculations
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = np.random.randn(frames, beads, beads - 1).astype(np.float64)
# Define random parameters for the polynomial_cutoff RBF
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
alpha = np.random.uniform(0.1, 1.0)
# Calculate Gaussian expansion using the implemented layer
polynomial_cutoff_rbf = PolynomialCutoffRBF(high_cutoff=high_cutoff,
low_cutoff=low_cutoff,
n_gaussians=n_gaussians,
alpha=alpha,
tolerance=1e-8)
polynomial_cutoff_rbf_layer = polynomial_cutoff_rbf.forward(
torch.tensor(distances))
# Manually calculate expansion with numpy
# First, we compute the centers and the scaling factors
centers = np.linspace(np.exp(-high_cutoff), np.exp(-low_cutoff),
n_gaussians).astype(np.float64)
beta = np.power(((2/n_gaussians) * (1-np.exp(-high_cutoff))), -2)
# Next, we compute the gaussian portion
exp_distances = np.exp(-alpha * np.expand_dims(distances, axis=3))
magnitude_squared = np.power(exp_distances - centers, 2)
gauss_manual = np.exp(-beta * magnitude_squared)
# Next, we compute the polynomial modulation
zeros = np.zeros_like(distances)
modulation = np.where(distances < high_cutoff,
1 - 6.0 * np.power((distances/high_cutoff), 5)
+ 15.0 * np.power((distances/high_cutoff), 4)
- 10.0 * np.power((distances/high_cutoff), 3),
zeros)
modulation = np.expand_dims(modulation, axis=3)
polynomial_cutoff_rbf_manual = modulation * gauss_manual
# Map tiny values to zero
polynomial_cutoff_rbf_manual = np.where(
np.abs(polynomial_cutoff_rbf_manual) > polynomial_cutoff_rbf.tolerance,
polynomial_cutoff_rbf_manual,
np.zeros_like(polynomial_cutoff_rbf_manual)
)
# centers and output values need to be the same
np.testing.assert_allclose(centers,
polynomial_cutoff_rbf.centers, rtol=1e-5)
np.testing.assert_allclose(polynomial_cutoff_rbf_layer.numpy(),
polynomial_cutoff_rbf_manual, rtol=1e-5)
def test_polynomial_cutoff_rbf_distance_masking():
# Makes sure that if a distance mask is used, the corresponding
# expanded distances returned by PolynomialCutoffRBF are zero
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = torch.randn((frames, beads, beads - 1), dtype=torch.float64)
# Define random parameters for the RBF
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
alpha = np.random.uniform(0.1, 1.0)
neighbor_cutoff = np.abs(np.random.rand())
neighbors, neighbor_mask = g.get_neighbors(distances,
cutoff=neighbor_cutoff)
# Calculate Gaussian expansion using the implemented layer
polynomial_cutoff_rbf = PolynomialCutoffRBF(high_cutoff=high_cutoff,
low_cutoff=low_cutoff,
n_gaussians=n_gaussians,
alpha=alpha,
tolerance=1e-8)
polynomial_cutoff_rbf_layer = polynomial_cutoff_rbf.forward(
torch.tensor(distances),
distance_mask=neighbor_mask)
# Manually calculate expansion with numpy
# First, we compute the centers and the scaling factors
centers = np.linspace(np.exp(-high_cutoff), np.exp(-low_cutoff),
n_gaussians).astype(np.float64)
beta = np.power(((2/n_gaussians) * (1-np.exp(-high_cutoff))), -2)
# Next, we compute the gaussian portion
exp_distances = np.exp(-alpha * np.expand_dims(distances, axis=3))
magnitude_squared = np.power(exp_distances - centers, 2)
gauss_manual = np.exp(-beta * magnitude_squared)
# Next, we compute the polynomial modulation
zeros = np.zeros_like(distances)
modulation = np.where(distances < high_cutoff,
1 - 6.0 * np.power((distances/high_cutoff), 5)
+ 15.0 * np.power((distances/high_cutoff), 4)
- 10.0 * np.power((distances/high_cutoff), 3),
zeros)
modulation = np.expand_dims(modulation, axis=3)
polynomial_cutoff_rbf_manual = modulation * gauss_manual
# Map tiny values to zero
polynomial_cutoff_rbf_manual = np.where(
np.abs(polynomial_cutoff_rbf_manual) > polynomial_cutoff_rbf.tolerance,
polynomial_cutoff_rbf_manual,
np.zeros_like(polynomial_cutoff_rbf_manual)
)
polynomial_cutoff_rbf_manual = torch.tensor(
polynomial_cutoff_rbf_manual) * neighbor_mask[:, :, :, None].double()
np.testing.assert_array_almost_equal(polynomial_cutoff_rbf_layer.numpy(),
polynomial_cutoff_rbf_manual.numpy())
def test_polynomial_cutoff_rbf_normalize():
# Tests to make sure that the output of PolynomialCutoffRBF is properly
# normalized if 'normalize_output' is specified as True
# Distances need to have shape (n_batch, n_beads, n_neighbors)
distances = np.random.randn(frames, beads, beads - 1).astype(np.float64)
# Define random parameters for the polynomial_cutoff RBF
n_gaussians = np.random.randint(5, 10)
high_cutoff = np.random.uniform(5.0, 10.0)
low_cutoff = np.random.uniform(0.0, 4.0)
alpha = np.random.uniform(0.1, 1.0)
# Calculate Gaussian expansion using the implemented layer
polynomial_cutoff_rbf = PolynomialCutoffRBF(high_cutoff=high_cutoff,
low_cutoff=low_cutoff,
n_gaussians=n_gaussians,
alpha=alpha,
normalize_output=True,
tolerance=1e-8)
polynomial_cutoff_rbf_layer = polynomial_cutoff_rbf.forward(
torch.tensor(distances))
# Manually calculate expansion with numpy
# First, we compute the centers and the scaling factors
centers = np.linspace(np.exp(-high_cutoff), | np.exp(-low_cutoff) | numpy.exp |
"""
Contains functions for the Dueling-Thompson sampling acquisition function by Gonzalez et al (2017).
"""
import numpy as np
import tensorflow as tf
import gpflow
from .. import fourier_features
def uniform_grid(input_dims, num_discrete_per_dim, low, high):
"""
Returns an array with all possible permutations of discrete values in input_dims number of dimensions.
:param input_dims: int
:param num_discrete_per_dim: int
:param low: int
:param high: int
:return: tensor of shape (num_discrete_per_dim ** input_dims, input_dims)
"""
num_points = num_discrete_per_dim ** input_dims
out = np.zeros([num_points, input_dims])
discrete_points = np.linspace(low, high, num_discrete_per_dim)
for i in range(num_points):
for dim in range(input_dims):
val = num_discrete_per_dim ** (dim)
out[i, dim] = discrete_points[int((i // val) % num_discrete_per_dim)]
return out
def combinations(points):
"""
Given d-dimensional points, return all pair combinations of those points
:param points: tensor of shape (n, d)
:return: tensor of shape (n ** 2, d * 2)
"""
n = points.shape[0]
d = points.shape[1]
out = | np.zeros((n*n, d*2)) | numpy.zeros |
"""Tests for the OpenCL kernels."""
from .conftest import context_available
from ..cl import get_context
import numpy as np
import pyopencl as cl
from pyopencl import mem_flags as mf
import pathlib
import pytest
@pytest.fixture(scope="module")
def context():
"""Create a context using the default platform, prefer GPU."""
return get_context()
@context_available
@pytest.fixture(scope="module")
def queue(context):
"""Create a CL command queue."""
return cl.CommandQueue(context)
@context_available
@pytest.fixture(scope="module")
def program(context):
"""Create a program object for the Euler integrator."""
kernel_source = open(
pathlib.Path(__file__).parent.absolute() /
"../cl/euler.cl").read()
return cl.Program(context, kernel_source).build()
class TestUpdateDisplacement:
"""Test the displacement update."""
@context_available
def test_update_displacement(self, context, queue, program):
"""Test basic displacement update."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([0, 0, 0], dtype=np.intc)
bc_values = np.array([0, 0, 0], dtype=np.float64)
displacement_bc_scale = 0
dt = 1
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
assert np.all(u == force)
@context_available
def test_update_displacement2(self, context, queue, program):
"""Test displacement update."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([0, 0, 0], dtype=np.intc)
bc_values = np.array([0, 0, 0], dtype=np.float64)
displacement_bc_scale = 0
dt = 2.0
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
assert np.all(u == 2.0*force)
@context_available
def test_update_displacement3(self, context, queue, program):
"""Test displacement update with displacement boundary conditions."""
u = np.zeros(3)
nnodes = 1
force = np.array([1.0, 2.0, 3.0], dtype=np.float64)
bc_types = np.array([1, 1, 0], dtype=np.intc)
bc_values = np.array([0.0, 0.0, 0.0], dtype=np.float64)
displacement_bc_scale = 1.0
dt = 2.0
# Set buffers
# Read only
bc_types_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_types)
bc_values_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=bc_values)
force_d = cl.Buffer(
context, mf.READ_ONLY | mf.COPY_HOST_PTR,
hostbuf=force)
# Read write
u_d = cl.Buffer(
context, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=u)
# Build kernels
update_displacement_kernel = program.update_displacement
update_displacement_kernel(
queue, (3 * nnodes,), None,
force_d, u_d, bc_types_d, bc_values_d,
np.float64(displacement_bc_scale), np.float64(dt))
cl.enqueue_copy(queue, u, u_d)
u_expected = np.array([0.0, 0.0, 6.0])
assert | np.all(u == u_expected) | numpy.all |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by <NAME> (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
from distutils.util import strtobool
import logging
import math
import os
import sys
import numpy as np
import torch
import torch.multiprocessing as mp
from utils import find_files, read_hdf5, read_txt, write_hdf5
from gru_vae import GRU_RNN, sampling_vae_batch
from dtw_c import dtw_c as dtw
np.set_printoptions(threshold=np.inf)
def main():
parser = argparse.ArgumentParser()
# decode setting
parser.add_argument("--feats", required=True,
type=str, help="list or directory of source eval feat files")
parser.add_argument("--feats_trg", required=True,
type=str, help="list or directory of source eval feat files")
parser.add_argument("--stats_src", required=True,
type=str, help="hdf5 file including source statistics")
parser.add_argument("--stats_trg", required=True,
type=str, help="hdf5 file including target statistics")
parser.add_argument("--stats_jnt",
type=str, help="hdf5 file including target statistics")
parser.add_argument("--model", required=True,
type=str, help="model file")
parser.add_argument("--config", required=True,
type=str, help="configure file")
parser.add_argument("--n_gpus", default=1,
type=int, help="number of gpus")
parser.add_argument("--n_smpl_dec", default=300,
type=int, help="number of gpus")
parser.add_argument("--outdir", required=True,
type=str, help="directory to save generated samples")
parser.add_argument("--write_gv", default=False,
type=strtobool, help="flag to write gv stats")
# other setting
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--GPU_device", default=0,
type=int, help="selection of GPU device")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_device)
# check directory existence
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# set log level
if args.verbose > 0:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.outdir + "/decode.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# load config
config = torch.load(args.config)
# get source feat list
if os.path.isdir(args.feats):
feat_list = sorted(find_files(args.feats, "*.h5"))
elif os.path.isfile(args.feats):
feat_list = read_txt(args.feats)
else:
logging.error("--feats should be directory or list.")
sys.exit(1)
# get target feat list
if os.path.isdir(args.feats_trg):
feat_trg_list = sorted(find_files(args.feats_trg, "*.h5"))
elif os.path.isfile(args.feats_trg):
feat_trg_list = read_txt(args.feats_trg)
else:
logging.error("--feats_trg should be directory or list.")
sys.exit(1)
# prepare the file list for parallel decoding
feat_lists = np.array_split(feat_list, args.n_gpus)
feat_lists = [f_list.tolist() for f_list in feat_lists]
feat_trg_lists = np.array_split(feat_trg_list, args.n_gpus)
feat_trg_lists = [f_list.tolist() for f_list in feat_trg_lists]
spk_src = os.path.basename(os.path.dirname(feat_lists[0][0]))
spk_trg = os.path.basename(os.path.dirname(feat_trg_lists[0][0]))
gv_mean_src = read_hdf5(args.stats_src, "/gv_range_mean")[1:]
gv_mean_trg = read_hdf5(args.stats_trg, "/gv_range_mean")[1:]
# define gpu decode function
def gpu_decode(feat_list, feat_trg_list, gpu, cvlist=None, mcdlist=None, mcdstdlist=None, mcdpowlist=None, mcdpowstdlist=None, cvlist_src=None, mcdlist_src=None, mcdstdlist_src=None, mcdpowlist_src=None, mcdpowstdlist_src=None, cvlist_trg=None, mcdlist_trg=None, mcdstdlist_trg=None, mcdpowlist_trg=None, mcdpowstdlist_trg=None, lat_dist_rmse_enc_list=None, lat_dist_cosim_enc_list=None, lat_dist_rmse_pri_list=None, lat_dist_cosim_pri_list=None):
with torch.cuda.device(gpu):
mean_jnt = torch.FloatTensor(read_hdf5(args.stats_jnt, "/mean_feat_org_lf0_jnt")[config.stdim:]).cuda()
std_jnt = torch.FloatTensor(read_hdf5(args.stats_jnt, "/scale_feat_org_lf0_jnt")[config.stdim:]).cuda()
# define model and load parameters
logging.info("model")
logging.info(config)
with torch.no_grad():
model_encoder = GRU_RNN(
in_dim=config.in_dim,
out_dim=config.lat_dim*2,
hidden_layers=config.hidden_layers,
hidden_units=config.hidden_units,
kernel_size=config.kernel_size,
dilation_size=config.dilation_size,
scale_out_flag=False)
model_decoder = GRU_RNN(
in_dim=config.lat_dim+2,
out_dim=config.out_dim,
hidden_layers=config.hidden_layers,
hidden_units=config.hidden_units,
kernel_size=config.kernel_size,
dilation_size=config.dilation_size,
scale_in_flag=False)
model_encoder.load_state_dict(torch.load(args.model)["model_encoder"])
model_decoder.load_state_dict(torch.load(args.model)["model_decoder"])
model_encoder.cuda()
model_decoder.cuda()
model_encoder.eval()
model_decoder.eval()
for param in model_encoder.parameters():
param.requires_grad = False
for param in model_decoder.parameters():
param.requires_grad = False
logging.info(model_encoder)
logging.info(model_decoder)
init_pp = np.zeros((1,1,config.lat_dim*2))
y_in_pp = torch.FloatTensor(init_pp).cuda()
y_in_src = y_in_trg = torch.unsqueeze(torch.unsqueeze((0-mean_jnt)/std_jnt,0),0)
for feat_file, feat_trg_file in zip(feat_list, feat_trg_list):
# convert mcep
logging.info("cvmcep " + feat_file + " " + feat_trg_file)
feat = read_hdf5(feat_file, "/feat_org_lf0")
feat_trg = read_hdf5(feat_trg_file, "/feat_org_lf0")
logging.info(feat.shape)
logging.info(feat_trg.shape)
with torch.no_grad():
lat_src, _, _ = model_encoder(torch.FloatTensor(feat).cuda(), y_in_pp, clamp_vae=True, lat_dim=config.lat_dim)
lat_feat = sampling_vae_batch(lat_src.unsqueeze(0).repeat(args.n_smpl_dec,1,1), lat_dim=config.lat_dim)
lat_feat = torch.mean(lat_feat, 0)
lat_trg, _, _ = model_encoder(torch.FloatTensor(feat_trg).cuda(), y_in_pp, clamp_vae=True, lat_dim=config.lat_dim)
lat_feat_trg = sampling_vae_batch(lat_trg.unsqueeze(0).repeat(args.n_smpl_dec,1,1), lat_dim=config.lat_dim)
lat_feat_trg = torch.mean(lat_feat_trg, 0)
src_code = np.zeros((lat_feat.shape[0],2))
trg_code = np.zeros((lat_feat.shape[0],2))
trg_trg_code = np.zeros((lat_feat_trg.shape[0],2))
src_code[:,0] = 1
trg_code[:,1] = 1
trg_trg_code[:,1] = 1
src_code = torch.FloatTensor(src_code).cuda()
trg_code = torch.FloatTensor(trg_code).cuda()
trg_trg_code = torch.FloatTensor(trg_trg_code).cuda()
cvmcep, _, _ = model_decoder(torch.cat((trg_code, lat_feat),1), y_in_trg)
cvmcep = np.array(cvmcep.cpu().data.numpy(), dtype=np.float64)
cvmcep_src, _, _ = model_decoder(torch.cat((src_code, lat_feat),1), y_in_src)
cvmcep_src = np.array(cvmcep_src.cpu().data.numpy(), dtype=np.float64)
cvmcep_trg, _, _ = model_decoder(torch.cat((trg_trg_code, lat_feat_trg),1), y_in_trg)
cvmcep_trg = np.array(cvmcep_trg.cpu().data.numpy(), dtype=np.float64)
logging.info(cvmcep.shape)
logging.info(cvmcep_trg.shape)
cvlist.append(np.var(cvmcep[:,1:], axis=0))
cvlist_src.append(np.var(cvmcep_src[:,1:], axis=0))
cvlist_trg.append(np.var(cvmcep_trg[:,1:], axis=0))
logging.info(len(cvlist))
spcidx_src = read_hdf5(feat_file, "/spcidx_range")[0]
mcep_trg = read_hdf5(feat_trg_file, "/mcepspc_range")
_, _, _, mcdpow_arr = dtw.dtw_org_to_trg(np.array(cvmcep[np.array(spcidx_src),:], dtype=np.float64), np.array(mcep_trg[:,:], dtype=np.float64))
_, _, _, mcd_arr = dtw.dtw_org_to_trg(np.array(cvmcep[np.array(spcidx_src),1:], dtype=np.float64), np.array(mcep_trg[:,1:], dtype=np.float64))
mcdpow_mean = np.mean(mcdpow_arr)
mcdpow_std = np.std(mcdpow_arr)
mcd_mean = np.mean(mcd_arr)
mcd_std = np.std(mcd_arr)
logging.info("mcdpow: %.6f dB +- %.6f" % (mcdpow_mean, mcdpow_std))
logging.info("mcd: %.6f dB +- %.6f" % (mcd_mean, mcd_std))
mcdpowlist.append(mcdpow_mean)
mcdpowstdlist.append(mcdpow_std)
mcdlist.append(mcd_mean)
mcdstdlist.append(mcd_std)
mcep_src = read_hdf5(feat_file, "/mcepspc_range")
_, mcdpow_arr = dtw.calc_mcd(np.array(mcep_src[:,:], dtype=np.float64), np.array(cvmcep_src[np.array(spcidx_src),:], dtype=np.float64))
_, mcd_arr = dtw.calc_mcd(np.array(mcep_src[:,1:], dtype=np.float64), np.array(cvmcep_src[np.array(spcidx_src),1:], dtype=np.float64))
mcdpow_mean = np.mean(mcdpow_arr)
mcdpow_std = np.std(mcdpow_arr)
mcd_mean = np.mean(mcd_arr)
mcd_std = np.std(mcd_arr)
logging.info("mcdpow_src: %.6f dB +- %.6f" % (mcdpow_mean, mcdpow_std))
logging.info("mcd_src: %.6f dB +- %.6f" % (mcd_mean, mcd_std))
mcdpowlist_src.append(mcdpow_mean)
mcdpowstdlist_src.append(mcdpow_std)
mcdlist_src.append(mcd_mean)
mcdstdlist_src.append(mcd_std)
spcidx_trg = read_hdf5(feat_trg_file, "/spcidx_range")[0]
_, mcdpow_arr = dtw.calc_mcd(np.array(mcep_trg[:,:], dtype=np.float64), np.array(cvmcep_trg[np.array(spcidx_trg),:], dtype=np.float64))
_, mcd_arr = dtw.calc_mcd(np.array(mcep_trg[:,1:], dtype=np.float64), np.array(cvmcep_trg[np.array(spcidx_trg),1:], dtype=np.float64))
mcdpow_mean = np.mean(mcdpow_arr)
mcdpow_std = np.std(mcdpow_arr)
mcd_mean = np.mean(mcd_arr)
mcd_std = np.std(mcd_arr)
logging.info("mcdpow_trg: %.6f dB +- %.6f" % (mcdpow_mean, mcdpow_std))
logging.info("mcd_trg: %.6f dB +- %.6f" % (mcd_mean, mcd_std))
mcdpowlist_trg.append(mcdpow_mean)
mcdpowstdlist_trg.append(mcdpow_std)
mcdlist_trg.append(mcd_mean)
mcdstdlist_trg.append(mcd_std)
with torch.no_grad():
spcidx_src = torch.LongTensor(spcidx_src).cuda()
spcidx_trg = torch.LongTensor(spcidx_trg).cuda()
trj_lat_src = np.array(torch.index_select(lat_src,0,spcidx_src).cpu().data.numpy(), dtype=np.float64)
trj_lat_trg = np.array(torch.index_select(lat_trg,0,spcidx_trg).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg)
lat_dist_srctrg = np.mean(np.sqrt(np.mean((aligned_lat_srctrg-trj_lat_trg)**2, axis=0)))
_, _, lat_cdist_srctrg, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src, mcd=0)
aligned_lat_trgsrc, _, _, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src)
lat_dist_trgsrc = np.mean(np.sqrt(np.mean((aligned_lat_trgsrc-trj_lat_src)**2, axis=0)))
_, _, lat_cdist_trgsrc, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg, mcd=0)
logging.info("%lf %lf %lf %lf" % (lat_dist_srctrg, lat_cdist_srctrg, lat_dist_trgsrc, lat_cdist_trgsrc))
lat_dist_rmse = (lat_dist_srctrg+lat_dist_trgsrc)/2
lat_dist_cosim = (lat_cdist_srctrg+lat_cdist_trgsrc)/2
lat_dist_rmse_enc_list.append(lat_dist_rmse)
lat_dist_cosim_enc_list.append(lat_dist_cosim)
logging.info("lat_dist_enc: %.6f %.6f" % (lat_dist_rmse, lat_dist_cosim))
trj_lat_src = np.array(torch.index_select(lat_feat,0,spcidx_src).cpu().data.numpy(), dtype=np.float64)
trj_lat_trg = np.array(torch.index_select(lat_feat_trg,0,spcidx_trg).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg)
lat_dist_srctrg = np.mean(np.sqrt(np.mean((aligned_lat_srctrg-trj_lat_trg)**2, axis=0)))
_, _, lat_cdist_srctrg, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src, mcd=0)
aligned_lat_trgsrc, _, _, _ = dtw.dtw_org_to_trg(trj_lat_trg, trj_lat_src)
lat_dist_trgsrc = np.mean(np.sqrt(np.mean((aligned_lat_trgsrc-trj_lat_src)**2, axis=0)))
_, _, lat_cdist_trgsrc, _ = dtw.dtw_org_to_trg(trj_lat_src, trj_lat_trg, mcd=0)
logging.info("%lf %lf %lf %lf" % (lat_dist_srctrg, lat_cdist_srctrg, lat_dist_trgsrc, lat_cdist_trgsrc))
lat_dist_rmse = (lat_dist_srctrg+lat_dist_trgsrc)/2
lat_dist_cosim = (lat_cdist_srctrg+lat_cdist_trgsrc)/2
lat_dist_rmse_pri_list.append(lat_dist_rmse)
lat_dist_cosim_pri_list.append(lat_dist_cosim)
logging.info("lat_dist_pri: %.6f %.6f" % (lat_dist_rmse, lat_dist_cosim))
# parallel decode training
with mp.Manager() as manager:
gpu = 0
processes = []
cvlist = manager.list()
mcdlist = manager.list()
mcdstdlist = manager.list()
mcdpowlist = manager.list()
mcdpowstdlist = manager.list()
cvlist_src = manager.list()
mcdlist_src = manager.list()
mcdstdlist_src = manager.list()
mcdpowlist_src = manager.list()
mcdpowstdlist_src = manager.list()
cvlist_trg = manager.list()
mcdlist_trg = manager.list()
mcdstdlist_trg = manager.list()
mcdpowlist_trg = manager.list()
mcdpowstdlist_trg = manager.list()
lat_dist_rmse_enc_list = manager.list()
lat_dist_cosim_enc_list = manager.list()
lat_dist_rmse_pri_list = manager.list()
lat_dist_cosim_pri_list = manager.list()
for i, (feat_list, feat_trg_list) in enumerate(zip(feat_lists, feat_trg_lists)):
logging.info(i)
p = mp.Process(target=gpu_decode, args=(feat_list, feat_trg_list, gpu, cvlist, mcdlist, mcdstdlist, mcdpowlist, mcdpowstdlist, cvlist_src, mcdlist_src, mcdstdlist_src, mcdpowlist_src, mcdpowstdlist_src, cvlist_trg, mcdlist_trg, mcdstdlist_trg, mcdpowlist_trg, mcdpowstdlist_trg, lat_dist_rmse_enc_list, lat_dist_cosim_enc_list, lat_dist_rmse_pri_list, lat_dist_cosim_pri_list,))
p.start()
processes.append(p)
gpu += 1
if (i + 1) % args.n_gpus == 0:
gpu = 0
# wait for all process
for p in processes:
p.join()
# calculate cv_gv statistics
cvgv_mean = np.mean(np.array(cvlist), axis=0)
cvgv_var = np.var(np.array(cvlist), axis=0)
cvgvsrc_mean = np.mean(np.array(cvlist_src), axis=0)
cvgvsrc_var = np.var(np.array(cvlist_src), axis=0)
cvgvtrg_mean = np.mean(np.array(cvlist_trg), axis=0)
cvgvtrg_var = np.var(np.array(cvlist_trg), axis=0)
logging.info(args.stats_src)
logging.info(args.stats_trg)
#logging.info(gv_mean_trg)
logging.info("mcdpow: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdpowlist)),np.std(np.array(mcdpowlist)),np.mean(np.array(mcdpowstdlist)),np.std(np.array(mcdpowstdlist))))
logging.info("mcd: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdlist)),np.std(np.array(mcdlist)),np.mean(np.array(mcdstdlist)),np.std(np.array(mcdstdlist))))
#logging.info(cvgv_mean)
logging.info("%lf +- %lf" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean_trg)))), np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean_trg))))))
logging.info("mcdpow_src: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdpowlist_src)),np.std(np.array(mcdpowlist_src)),np.mean(np.array(mcdpowstdlist_src)),np.std(np.array(mcdpowstdlist_src))))
logging.info("mcd_src: %.6f dB (+- %.6f) +- %.6f (+- %.6f)" % (np.mean(np.array(mcdlist_src)),np.std( | np.array(mcdlist_src) | numpy.array |
# -*- coding: utf-8 -*--
"""
Created on Tue Oct 23 09:42:24 2018
@author: William
"""
import re #import regex
import os
path_to_cpp = ''
#OS walk to find the cpp compilation
for root, dirs, files in os.walk(".", topdown=False):
for branch in dirs:
if 'ssa_cpp' in branch:
path_to_cpp = os.path.join(root, branch)
if path_to_cpp != '':
try:
cwd = os.getcwd()
os.chdir(path_to_cpp)
import ssa_translation
os.chdir(cwd)
except:
os.chdir(cwd)
try:
from snapgene_reader import snapgene_file_to_dict, snapgene_file_to_seqrecord
except:
pass
import time
import json, codecs
from scipy import sparse
from scipy.stats import pearsonr
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
import matplotlib.animation as animation
from matplotlib.collections import PatchCollection
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.patches import Ellipse
#import scipy.stats.trim_mean as tmean
from scipy.stats import kde
try:
from Bio import SeqIO
from Bio import Entrez
except:
print('BioPython is not installed, polling genbank will not be possible')
pass
import translation_models as models
class rSNAPsim():
"""
The Single Molecule Simulator (SMS) provides a python class for running
single molecule mRNA translation simulations
When presented with a valid protein sequence the SMS can find open reading frames
and simulate intensity trajectories from translation of the protein with given fluorescent tags.
*model description*
link to paper here / image
*main functions*
-open_seq_file(filepath), opens a txt or .gb file and gets the sequence
-get_orfs(nt_sequence, min_codons), returns open reading frames of a given
sequence and a minimum codon length per protein
-get_temporal_proteins(), gets the proteins after get_orfs
-analyze_poi(aa_seq,nt_seq), analyzes the proteins of intrest for
codon sensitivity and elongation rates
-__.poi(), class to contain proteins of intrest after analyzed
-run_default(), runs get_orfs, get_temporal proteins, and analyze_poi
with the first protien found in the sequence
*attributes*
**gene_sequence_str** = string of the nucleotide sequence
**tag_dict** = dictionary with various types of fluorescent tag epitopes
**tag_full** = dictionary of full tag sequences
**aa_keys** = amino acid single letter keys
**codon_types** = flag dictionary of which amino acids are set to Wild-type, fast, or slow
**aa_table** = dictionary of amino acids
**aa_table_r** = reverse dictionary (amino acid letters are the keys)
**strGeneCopy** = dictionary of wild-type tRNA copy numbers
**strGeneCopy_fast** = dictionary of fast tRNA copy numbers
**strGeneCopy_slow** = dictionary of slow tRNA copy numbers
**slow_codons_value** = list of slowest codon tRNA copy numbers
**fast_codons_value** = list of fastest codon tRNA copy numbers
**sensitivity_fast_slow** = list of sensitivity for amino acids
**poi** = Class container for proteins of intrest
**orfs** = dictionary of open reading frames with keys 1,2,3
**seq_str** = sequence string
**proteins** = dictionary of proteins detected in the sequence by ORF
**tagged_proteins** = dictionary of proteins that were detected and tagged
*POI*
Protein of intrest has the following attributes:
**aa_seq** = amino acid sequence
**nt_seq** = nucleotide sequence
**gene_length** = length of the gene
**tag_length** = length of the tags
**total_length** = total length of the full amino acid sequence
**name** = name of the gene
**tag_types** = what types of tags does the protien have
**tag_epitopes** = type of tags and epitope lists per tag
**codon_sensitivity** = how sensitive is the protein per amino acid sequence?
**CAI** = codon activation index
**CAI_codons** = means of the codon activation
*ssa*
The ssa container class has the following attributes:
**no_ribosomes** = number of ribosomes
**n_traj** = number of trajectories
**k** = all kelongation rates (calculated from codon sequence)
**no_rib_per_mrna** = number of ribosomes per mRNA strand on average
**rib_density** = ribosome density
**rib_means** = ribosome means
**rib_vec** = raw ribosome location matrix for each trajectory
**intensity_vec** = fluorescence intensities
**time_vec_fixed** = the time vector
**start_time** = the time the simulation was started
**evaluating_inhibitor** = was there an inhibitor present?
**evaluating_frap** = was the simulation subjected to a FRAP test
**time_inhibit** = the time of the perturbation
**autocorr_vec** = autocorrelation vector of intensities
**mean_autocorr** = the average autocorrelations, averaged over trajectories
**error_autocorr** = the standard deviation of the autocorrelation
**dwell_time** = how long do the ribosomes stay on the mRNA strand calculated by the simulation
**ke_sim** = the calculated average elongation rate from the simulations
"""
def __init__(self):
self.gene_sequence_str = ''
self.tag_dict = {'T_SunTag':'EELLSKNYHLENEVARLKK',
'T_Flag':'DYKDDDDK',
'T_Hemagglutinin':'YPYDVPDYA'}
self.tag_colors = {'T_SunTag':'green',
'T_Flag':'blue',
'T_Hemagglutinin':'blue'}
self.tag_full = {'T_Flag':('ATGGACTACAAGGACGACGACGACAAAGGTGAC'
'TACAAAGATGATGACGATAAAGGCGACTATA'
'AGGACGATGACGACAAGGGCGGAAACTCACTGA'
'TCAAGGAAAACATGCGGATGAAGGTGGTGAT'
'GGAGGGCTCCGTGAATGGTCACCAGTTCAAGTG'
'CACCGGAGAGGGAGAGGGAAACCCGTACATG'
'GGAACTCAGACCATGCGCATTAAGGTCATCGAA'
'GGAGGTCCGCTGCCGTTCGCTTTCGATATCC'
'TGGCCACTTCGTTCGGAGGAGGGTCGCGCACGTTC'
'ATCAAGTACCCGAAGGGAATCCCGGACTT'
'CTTTAAGCAGTCATTCCCGGAAGGATTCACTTGGG'
'AACGGGTGACCCGGTATGAAGATGGAGGT'
'GTGGTGACTGTCATGCAAGATACTTCGCTGGAGGATGGG'
'TGCCTCGTGTACCACGTCCAAGTCC'
'GCGGAGTGAATTTCCCGTCCAACGGACCAGTGATGCAG'
'AAAAAGACGAAGGGTTGGGAACCTAA'
'TACTGAAATGATGTACCCCGCAGACGGAGGGCTGAGGG'
'GCTACACCCACATGGCGCTGAAGGTC'
'GACGGAGGAGATTACAAGGATGACGACGATAAGCAACAA'
'GATTACAAAGACGATGATGACAAGG'
'GCCAGCAGGGCGACTACAAGGACGACGACGACAAGCAG'
'CAGGACTACAAAGATGACGATGATAA'
'AGGAGGAGGACATCTGTCCTGTTCGTTCGTGACCACCT'
'ACAGATCAAAGAAAACCGTGGGAAAC'
'ATCAAGATGCCGGGCATTCATGCCGTCGACCACCGCCT'
'GGAGCGGCTCGAAGAATCAGACAATG'
'AGATGTTCGTCGTGCAAAGAGAACATGCCGTGGCCAAGTT'
'CGCGGGACTGGGAGGCGGTGGAGG'
'CGATTACAAAGACGATGATGACAAGGGTGACTATAAAGA'
'CGACGATGACAAAGGGGATTACAAG'
'GATGATGATGATAAGGGAGGCGGTGGATCAGGTGGAG'
'GAGGTTCACTGCAG')}
self.aa_keys = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F',
'P', 'S', 'T', 'W', 'Y', 'V', '*']
self.codon_types = dict(zip(self.aa_keys, np.ones((1, 21)).flatten().astype(int).tolist()))
self.aa_table = {
'ATA':'I', 'ATC':'I', 'ATT':'I', 'ATG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACT':'T',
'AAC':'N', 'AAT':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGT':'S', 'AGA':'R', 'AGG':'R',
'CTA':'L', 'CTC':'L', 'CTG':'L', 'CTT':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCT':'P',
'CAC':'H', 'CAT':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGT':'R',
'GTA':'V', 'GTC':'V', 'GTG':'V', 'GTT':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCT':'A',
'GAC':'D', 'GAT':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGT':'G',
'TCA':'S', 'TCC':'S', 'TCG':'S', 'TCT':'S',
'TTC':'F', 'TTT':'F', 'TTA':'L', 'TTG':'L',
'TAC':'Y', 'TAT':'Y', 'TAA':'*', 'TAG':'*',
'TGC':'C', 'TGT':'C', 'TGA':'*', 'TGG':'W',
'AUA':'I', 'AUC':'I', 'AUU':'I', 'AUG':'M',
'ACA':'T', 'ACC':'T', 'ACG':'T', 'ACU':'T',
'AAC':'N', 'AAU':'N', 'AAA':'K', 'AAG':'K',
'AGC':'S', 'AGU':'S', 'AGA':'R', 'AGG':'R',
'CUA':'L', 'CUC':'L', 'CUG':'L', 'CUU':'L',
'CCA':'P', 'CCC':'P', 'CCG':'P', 'CCU':'P',
'CAC':'H', 'CAU':'H', 'CAA':'Q', 'CAG':'Q',
'CGA':'R', 'CGC':'R', 'CGG':'R', 'CGU':'R',
'GUA':'V', 'GUC':'V', 'GUG':'V', 'GUU':'V',
'GCA':'A', 'GCC':'A', 'GCG':'A', 'GCU':'A',
'GAC':'D', 'GAU':'D', 'GAA':'E', 'GAG':'E',
'GGA':'G', 'GGC':'G', 'GGG':'G', 'GGU':'G',
'UCA':'S', 'UCC':'S', 'UCG':'S', 'UCU':'S',
'UUC':'F', 'UUU':'F', 'UUA':'L', 'UUG':'L',
'UAC':'Y', 'UAU':'Y', 'UAA':'*', 'UAG':'*',
'UGC':'C', 'UGU':'C', 'UGA':'*', 'UGG':'W',}
self.aa_table_r = {'A':['GCA', 'GCC', 'GCG', 'GCT','GCU'],
'R':['CGA', 'CGC', 'CGG', 'CGT','AGG','AGA','CGU'],
'N':['AAC', 'AAT','AAU'],
'D':['GAC', 'GAT','GAU'],
'C':['TGC', 'TGT','UGC','UGU'],
'Q':['CAA', 'CAG'],
'E':['GAA', 'GAG'],
'G':['GGT', 'GGC', 'GGA', 'GGC','GGU'],
'H':['CAC', 'CAT','CAU'],
'I':['ATT', 'ATC', 'ATA','AUU','AUC','AUA'],
'L':['CTA', 'CTC', 'CTG', 'CTT', 'TTA', 'TTG','CUA', 'CUC', 'CUG', 'CUU', 'UUA', 'UUG'],
'K':['AAA', 'AAG'],
'M':['ATG','AUG'],
'F':['TTC', 'TTT','UUC','UUU'],
'P':['CCT', 'CCC', 'CCG', 'CCA','CCU'],
'S':['TCA', 'TCC', 'TCG', 'TCT','AGC','AGT','UCA','UCC','UCG'],
'T':['ACA', 'ACC', 'ACG', 'ACT','ACU'],
'W':['TGG','UGG'],
'Y':['TAT', 'TAC','UAC','UAU'],
'V':['GTA', 'GTC', 'GTT','GTG','GUG','GUU','GUC','GUA'],
'*':['TGA', 'TAG', 'TAA','UGA','UAG','UAA']
}
self.strGeneCopy = {'TTT': 17.6, 'TCT': 15.2, 'TAT': 12.2, 'TGT': 10.6, 'TTC': 20.3,
'TCC': 17.7, 'TAC': 15.3, 'TGC': 12.6, 'TTA': 7.7, 'TCA': 12.2,
'TAA': 1.0, 'TGA': 1.6, 'TTG': 12.9, 'TCG': 4.4, 'TAG': 0.8,
'TGG': 13.2, 'CTT': 13.2, 'CCT': 17.5, 'CAT': 10.9, 'CGT': 4.5,
'CTC': 19.6, 'CCC': 19.8, 'CAC': 15.1, 'CGC': 10.4, 'CTA': 7.2,
'CCA': 16.9, 'CAA': 12.3, 'CGA': 6.2, 'CTG': 39.6, 'CCG': 6.9,
'CAG': 34.2, 'CGG': 11.4, 'ATT': 16.0, 'ACT': 13.1, 'AAT': 17.0,
'AGT': 12.1, 'ATC': 20.8, 'ACC': 18.9, 'AAC': 19.1, 'AGC': 19.5,
'ATA': 7.5, 'ACA': 15.1, 'AAA': 24.4, 'AGA': 12.2, 'ATG': 22.0,
'ACG': 6.1, 'AAG': 31.9, 'AGG': 12.0, 'GTT': 11.0, 'GCT': 18.4,
'GAT': 21.8, 'GGT': 10.8, 'GTC': 14.5, 'GCC': 27.7, 'GAC': 25.1,
'GGC': 22.2, 'GTA': 7.1, 'GCA': 15.8, 'GAA': 29.0, 'GGA': 16.5,
'GTG': 28.1, 'GCG': 7.4, 'GAG': 39.6, 'GGG': 16.5}
# add the U codons
for key in list(self.strGeneCopy.keys()):
if 'T' in key:
val = self.strGeneCopy[key]
newkey = key.replace('T','U')
self.strGeneCopy[newkey] = val
self.strGeneCopy_fast = {'GCT': 27.7, 'GCC': 27.7, 'GCA': 27.7, 'GCG': 27.7, #A
'CGT': 12.2, 'CGC': 12.2, 'CGA': 12.2, 'CGG': 12.2,
'AGA': 12.2, 'AGG': 12.2, # R
'AAT': 19.1, 'AAC': 19.1, #N
'GAT': 25.1, 'GAC': 25.1, # D
'TGT': 12.6, 'TGC': 12.6, # C
'CAA': 34.2, 'CAG': 34.2, # Q
'GAA': 39.6, 'GAG': 39.6, #E
'GGT': 22.2, 'GGC': 22.2, 'GGA': 22.2, 'GGG': 22.2, # G
'CAT': 15.1, 'CAC': 15.1, # H
'ATT': 20.8, 'ATC': 20.8, 'ATA': 20.8, # I
'TTA': 39.6, 'TTG': 39.6, 'CTT': 39.6, 'CTC': 39.6,
'CTA': 39.6, 'CTG': 39.6, # L
'AAA': 31.9, 'AAG': 31.9, # K
'ATG': 22.0, #M
'TTT': 20.3, 'TTC': 20.3, # F
'CCT': 19.8, 'CCC': 19.8, 'CCA': 19.8, 'CCG': 19.8, # P
'TCT': 19.5, 'TCC': 19.5, 'TCA': 19.5, 'TCG': 19.5,
'AGT': 19.5, 'AGC': 19.5, # S
'ACT': 18.9, 'ACC': 18.9, 'ACA': 18.9, 'ACG': 18.9, # T
'TGG': 13.2, #W
'TAT': 15.3, 'TAC': 15.3, # Y
'GTT': 28.1, 'GTC': 28.1, 'GTA':28.1, 'GTG': 28.1, # V
'TAA': 1.6, 'TAG': 1.6, 'TGA':1.6 #STOP
}
for key in list(self.strGeneCopy_fast.keys()):
if 'T' in key:
val = self.strGeneCopy_fast[key]
newkey = key.replace('T','U')
self.strGeneCopy_fast[newkey] = val
self.strGeneCopy_slow = {'GCT': 7.4, 'GCC': 7.4, 'GCA': 7.4, 'GCG': 7.4, #A
'CGT': 4.5, 'CGC': 4.5, 'CGA': 4.5, 'CGG': 4.5,
'AGA':4.5, 'AGG':4.5, #R
'AAT': 17.0, 'AAC':17.0, #%N
'GAT': 21.8, 'GAC': 21.8, #D
'TGT': 10.6, 'TGC':10.6, #C
'CAA': 12.3, 'CAG': 12.3, #Q
'GAA': 29.0, 'GAG': 29.0, #E
'GGT': 10.8, 'GGC': 10.8, 'GGA': 10.8, 'GGG': 10.8, #G
'CAT': 10.9, 'CAC':10.9, #H
'ATT': 7.5, 'ATC': 7.5, 'ATA': 7.5, #I
'TTA': 7.2, 'TTG':7.2, 'CTT': 7.2, 'CTC': 7.2,
'CTA': 7.2, 'CTG': 7.2, #L
'AAA': 24.4, 'AAG': 24.4, #K
'ATG': 22.0, #M
'TTT': 17.6, 'TTC': 17.6, #F
'CCT': 6.9, 'CCC': 6.9, 'CCA': 6.9, 'CCG': 6.9, #P
'TCT': 4.4, 'TCC': 4.4, 'TCA': 4.4, 'TCG': 4.4,
'AGT': 4.4, 'AGC': 4.4, #S
'ACT': 6.1, 'ACC': 6.1, 'ACA': 6.1, 'ACG': 6.1,#T
'TGG': 13.2, #W
'TAT': 12.2, 'TAC': 12.2, #Y
'GTT': 7.1, 'GTC':7.1, 'GTA': 7.1, 'GTG': 7.1, # V
'TAA': 0.8, 'TAG': 0.8, 'TGA': 0.8 #STOP CODON}
}
for key in list(self.strGeneCopy_slow.keys()):
if 'T' in key:
val = self.strGeneCopy_slow[key]
newkey = key.replace('T','U')
self.strGeneCopy_slow[newkey] = val
self.fast_codons_value = [27.7, 12.2, 19.1, 25.1, 12.6, 34.2, 39.6, 22.2, 15.1,
20.8, 39.6, 31.9, 22, 20.3, 19.8, 19.5,
18.9, 13.2, 15.3, 28.1, 1.6]
self.slow_codons_value = [7.4, 4.5, 17, 21.8, 10.6, 12.3, 29, 10.8, 10.9, 7.5, 7.2,
24.4, 22, 17.6, 6.9, 4.4, 6.1, 13.2, 12.2, 7.1, .8]
fullcodonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA',
'GCU', 'CGU', 'AAU', 'GAU', 'UGU', 'CAA', 'GAA', 'GGU', 'CAU',
'AUU', 'UUA', 'AAA', 'AUG', 'UUU', 'CCU', 'TCU',
'ACU', 'UGG', 'UAU', 'GUU', 'UAA', ]
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT',
'ATT', 'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT',
'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.append(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def __update_sensitivity(self):
"""
updates sensitivities for the GUI implementation call
"""
self.fast_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.append(self.strGeneCopy[codon])
self.fast_codons_value.append(max(values))
for codon in codons:
self.strGeneCopy_fast[codon] = max(values)
self.slow_codons_value = []
for key in self.aa_keys:
values = []
codons = self.aa_table_r[key]
for codon in codons:
values.append(self.strGeneCopy_slow[codon])
self.slow_codons_value.append(min(values))
for codon in codons:
self.strGeneCopy_slow[codon] = min(values)
codonkeys = ['GCT', 'CGT', 'AAT', 'GAT', 'TGT', 'CAA', 'GAA', 'GGT', 'CAT', 'ATT',
'TTA', 'AAA', 'ATG', 'TTT', 'CCT', 'TCT', 'ACT', 'TGG', 'TAT', 'GTT', 'TAA']
self.sensitivity_fast_slow = []
for i in range(len(codonkeys)):
self.sensitivity_fast_slow.append(self.strGeneCopy_fast[codonkeys[i]] / self.strGeneCopy_slow[codonkeys[i]])
def load_tags(self):
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.append(line)
for line in previous_tags:
custom_tag = line.strip('\n').split('---')
if custom_tag[0] not in self.tag_dict.keys():
self.tag_dict[custom_tag[0]] = custom_tag[2]
self.tag_full[custom_tag[0]] = custom_tag[1]
f.close()
def add_tag(self,nt_seq,name):
'''
add a custom tag sequence
'''
f= open("custom_tags.txt","r")
raw = f.readlines()
previous_tags = []
for line in raw:
if line != '\n':
previous_tags.append(line)
if not set(nt_seq.lower()).issubset( set(['a','t','c','g','u'])):
print('invalid NT sequence')
f.close()
return
aa_seq = self.nt2aa(nt_seq)
newtag =name+'---'+ nt_seq.lower() + '---'+ aa_seq.upper()+'\n'
if newtag not in previous_tags:
previous_tags.append(newtag)
f.close()
f= open("custom_tags.txt","w+")
for item in previous_tags:
f.write('%s' % item)
f.close()
def nt2aa(self, nt_seq):
'''
Translates nucleotides sequences to amino acid sequences
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**aa_seq**, amino acid sequence as string
'''
aa = ''
for i in range(0, len(nt_seq), 3):
aa += self.aa_table[nt_seq[i:i+3]]
return aa
def get_orfs(self, nt_seq='', min_codons=80):
'''
Returns open reading frames of the nucleotide sequence given
orfs = {'1':[proteins],
'2':[proteins],
'3':[proteins]}
*keyword args*
**nt_seq**, nucleotide sequence as a string. If left blank uses
the self.sequence_str
**min_codons**, minimum amount of codons to be considered
a protein in the open reading frame
'''
if nt_seq == '':
nt_seq = self.sequence_str
allstarts = np.array([m.start() for m in re.finditer('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))', nt_seq)])
#allsegments = re.findall('(?=A[TU]G((?:.{3})+?)[TU](?:AG|AA|GA))',self.sequence_str)
allstops = np.array([m.start() for m in re.finditer('(?=[TU](?:AG|AA|GA))', nt_seq)])
start_frames = allstarts%3
stop_frames = allstops%3
min_len = min_codons*3
orf1_starts = allstarts[np.where(start_frames == 0)]
orf2_starts = allstarts[np.where(start_frames == 1)]
orf3_starts = allstarts[np.where(start_frames == 2)]
orf1_stops = allstops[np.where(stop_frames == 0)]
orf2_stops = allstops[np.where(stop_frames == 1)]
orf3_stops = allstops[np.where(stop_frames == 2)]
self.starts = [orf1_starts, orf2_starts, orf3_starts]
self.stops = [orf1_stops, orf2_stops, orf3_stops]
self.orfs = {'1':[], '2':[], '3':[]}
self.orfs = {'1':[], '2':[], '3':[]}
laststop = 0
for start in orf1_starts:
nextstop = orf1_stops[np.where(orf1_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['1'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf2_starts:
nextstop = orf2_stops[np.where(orf2_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['2'].append((start, nextstop))
laststop = nextstop
laststop = 0
for start in orf3_starts:
nextstop = orf3_stops[np.where(orf3_stops > start)[0][0]]
if (nextstop - start) > min_len:
if nextstop != laststop:
self.orfs['3'].append((start, nextstop))
laststop = nextstop
def get_k_construct(self, nt_seq, k_init, k_elong_mean, codon_types=None):
'''
Returns the k_elongation rates of a given nucleotide sequence under constructed conditions
given some sort of key describing which amino acids are slow, fast or natural
*args*
**nt_seq**, nucleotide sequence to get the propensities of
**k_init**, initiation rate of starting translation
**k_elong_mean**, average rate of elongation for the protein translation
*keyword args*
**codon_types**, a dictonary or identifier determining which amino acids are slow, fast or natural
self.codon_types is an example dictionary for the user to change / utilize, if codon_types is left blank
get_k_construct uses this internal dictonary
ex: codon_types = 'slow' or 'rare' all amino acids set to slow
codon_types = 'fast' or 'common' all amino acids set to fast
codon_types = 'natural' all amino acids set to fast
codon_types = {'A':[0], 'T':[2]} A set to slow, T set to fast
codon_types = {'rare':['A','R'],'common':['L']} A and R set to slow, L set to fast
'''
if codon_types == None:
codon_types = self.codon_types
else:
all_natural = dict(zip(self.aa_keys, np.ones((1, 20)).flatten().astype(int).tolist()))
if isinstance(codon_types, str):
if codon_types == 'rare' or codon_types == 'slow':
all_natural = dict(zip(self.aa_keys, np.zeros((1, 20)).flatten().astype(int).tolist()))
if codon_types == 'common' or codon_types == 'fast':
all_natural = dict(zip(self.aa_keys, (2*np.ones((1, 20))).flatten().astype(int).tolist()))
if isinstance(codon_types, dict):
for key in codon_types.keys():
if isinstance(key, str):
if key.lower() not in ['rare', 'common', 'natural']:
if key.upper() in self.aa_keys:
if codon_types[key] in [0, 1, 2]:
all_natural[key] = key
if codon_types[key] in ['rare', 'common', 'natural']:
if codon_types[key] == 'rare':
all_natural[key] = 0
if codon_types[key] == 'common':
all_natural[key] = 2
if codon_types[key] == 'natural':
all_natural[key] = 1
else:
newkeys = codon_types[key]
for newkey in newkeys:
if newkey.upper() in self.aa_keys:
if key.lower() == 'rare':
all_natural[newkey.upper()] = 0
if key.lower() == 'common':
all_natural[newkey.upper()] = 2
if key.lower() == 'natural':
all_natural[newkey.upper()] = 1
if isinstance(key, int):
newkeys = codon_types[key]
for newkey in newkeys:
all_natural[newkey] = key
codon_types = all_natural
aa_seq = self.nt2aa(nt_seq)
tRNA_design = np.zeros((1, len(aa_seq)))
tRNA_norm = np.zeros((1, len(aa_seq)))
seperated_codons = [nt_seq[i:i+3] for i in range(0, len(nt_seq), 3)] #split codons by 3
for i in range(len(seperated_codons)):
tRNA_norm[0, i] = self.strGeneCopy[seperated_codons[i]]
for i in range(len(self.aa_keys)-1):
fs = codon_types[self.aa_keys[i]]
indexes = [m.start() for m in re.finditer(self.aa_keys[i], aa_seq)]
for index in indexes:
if fs == 0:
tRNA_design[0, index] = self.slow_codons_value[i]
if fs == 2:
tRNA_design[0, index] = self.fast_codons_value[i]
if fs == 1:
tRNA_design[0, index] = tRNA_norm[0, index]
tRNA_design[0, -1] = tRNA_norm[0, -1]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation_design = (tRNA_design / mean_tRNA_copynumber) * k_elong_mean
all_k_design = [k_init] + k_elongation_design.flatten().tolist() + [k_elong_mean]
return all_k_design
def get_ui(self, nt_seq):
'''
return the ratio of average gene copy number / sequence codon copy number
'''
mean_u = np.mean(self.strGeneCopy.values())
ui = []
for i in range(0, len(nt_seq), 3):
ui.append(mean_u/ self.strGeneCopy[nt_seq[i:i+3]])
return ui
def get_k_3_frame(self,nt_seq,k_elong_mean):
kelongs = []
for n in range(3):
if n !=0:
codons = nt_seq[n:-(3-n)]
else:
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
k_elongation.flatten().tolist()[:-1]
kelongs = kelongs + k_elongation.flatten().tolist()[:-1]
return kelongs
def get_k(self, nt_seq, k_init, k_elong_mean):
'''
returns all propensities for a given nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
**k_initiation**, initiation rate of ribosome binding
**k_elong_mean**, average rate of elgonation experimentally found
'''
codons = nt_seq
genelength = int(len(codons)/3)
seperated_codons = [codons[i:i+3] for i in range(0, len(codons), 3)] #split codons by 3
k_elongation = np.zeros((1, genelength))
tRNA_copynumber = np.zeros((1, genelength))
for i in range(len(seperated_codons)):
tRNA_copynumber[0, i] = self.strGeneCopy[seperated_codons[i]]
mean_tRNA_copynumber = np.mean(list(self.strGeneCopy.values()))
k_elongation = (tRNA_copynumber / mean_tRNA_copynumber) * k_elong_mean
all_k = [k_init] + k_elongation.flatten().tolist()[:-1] + [10]
return all_k
def get_temporal_proteins(self):
'''
gets all the temporal proteins after getting the ORFs
__.tagged_proteins = dictionary with keys of tag types and a list of proteins
__.pois = list of proteins of intrest
__.pois_seq = list of nucleotide sequences of proteins of sequences
__.proteins = dictonary with keys of 1 2 or 3 orfs
'''
self.proteins = {'1':[], '2':[], '3':[]}
self.tagged_proteins = {a:[] for a in self.tag_dict.keys()}
self.tagged_protein_seq = {a:[] for a in self.tag_dict.keys()}
for i in range(len(self.orfs)):
for j in range(len(self.orfs[str(i+1)])):
pro = self.nt2aa(self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3])
nt_seq = self.sequence_str[self.orfs[str(i+1)][j][0]:self.orfs[str(i+1)][j][1]+3]
self.proteins[str(i+1)].append(pro)
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in pro:
self.tagged_protein_seq[tag].append(nt_seq)
self.tagged_proteins[tag].append(pro)
tags = 0
for key in self.tagged_proteins.keys():
tags += len(self.tagged_proteins[key])
self.pois = []
self.pois_seq = []
for tag in self.tag_dict.keys():
for i in range(len(self.tagged_proteins[tag])):
if self.tagged_proteins[tag][i] not in self.pois:
self.pois.append(self.tagged_proteins[tag][i])
self.pois_seq.append(self.tagged_protein_seq[tag][i])
if len(self.pois) == 0:
POIs = []
pois_s = []
pois_nt = []
for i in range(len(self.gb_obj.features)):
try:
self.gb_obj.features[i].qualifiers['translation']
if tags == 0:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.nt2aa(self.tag_full['T_Flag']) + self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(self.tag_full['T_Flag'] + str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
else:
POIs.append(self.gb_obj.features[i])
pois_s.append(self.gb_obj.features[i].qualifiers['translation'][0])
pois_nt.append(str(self.gb_obj.seq)[int(self.gb_obj.features[i].location.start):int(self.gb_obj.features[i].location.end)])
except:
pass
self.pois = pois_s
self.pois_seq = pois_nt
def analyze_poi(self, protein, sequence, epitope_loc = 'front'):
'''
Analyzes the protein of intrest and stores it in __.POI
*args*
**protein**, amino acid sequence as a string
**sequence**, nucleotide sequence that goes with the protein
**epitope_loc**, consider the epitope location as the front, middle or back:
DDYDDK: front: 0, middle: 3, back: 6 for epitope location
'''
self.POI = poi()
self.POI.nt_seq = sequence
self.POI.aa_seq = protein
self.POI.name = self.sequence_name
self.POI.total_length = len(protein)
'''
for key in self.tagged_proteins:
if protein in self.tagged_proteins[key]:
self.POI.tag_types.append(key)
'''
self.POI.tag_types = []
for tag in self.tag_dict.keys():
if self.tag_dict[tag] in protein:
self.POI.tag_types.append(tag)
#''.join(sms.poi[0].split('DYKDDDDK')
self.POI.tag_epitopes = {a:[] for a in self.POI.tag_types}
gs = protein
for i in range(len(self.POI.tag_types)):
try:
nt_tag = self.tag_full[self.POI.tag_types[i]]
aa_tag = self.nt2aa(nt_tag)
except:
epi = self.tag_dict[self.POI.tag_types[i]]
firstep = self.POI.aa_seq.find(epi)
lastep = len(self.POI.aa_seq) - self.POI.aa_seq[::-1].find(epi[::-1])
aa_tag = self.POI.aa_seq[firstep:lastep]
nt_tag = self.POI.nt_seq[3*firstep:3*lastep]
if epitope_loc == 'front':
offset = 0
if epitope_loc == 'middle':
offset = int(len(self.tag_dict[self.POI.tag_types[i]])/2)
if epitope_loc == 'back':
offset = len(self.tag_dict[self.POI.tag_types[i]])
self.POI.tag_epitopes[self.POI.tag_types[i]] = [m.start()+1+offset for m in re.finditer(self.tag_dict[self.POI.tag_types[i]], self.POI.aa_seq)]
gs = gs.replace(aa_tag, '')
self.POI.gene_seq = gs
self.POI.gene_length = len(gs)
codons = []
for i in range(0, len(sequence), 3):
codons.append(sequence[i:i+3])
self.POI.codons = codons
self.POI.codon_sensitivity, self.POI.CAI, self.POI.CAI_codons = self.codon_usage(self.POI.nt_seq)
def open_seq_file(self, seqfile):
'''
Reads a sequence file, either a .txt file or a .gb genbank file
*args*
**seqfile**, sequence file either in txt, gb, gbk format
'''
seq = seqfile
self.sequence_name = ''
if '.dna' in seq:
self.sequence_name = seq[:-4]
try:
seq_record = snapgene_file_to_seqrecord(seq)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
self.sequence_str = seq_record.seq.tostring()
if '.txt' in seq:
with open(seq) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
namelen = 0
self.sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
self.sequence_str += section.upper()
elif set(section.upper()) == set(validu):
self.sequence_str += section.upper()
else:
if len(section)>namelen:
self.sequence_name = section
namelen = len(section)
if '.gb' in seq:
gb_record = SeqIO.read(open(seq, "r"), "genbank")
self.sequence_str = str(gb_record.seq)
self.sequence_name = gb_record.name
self.gb_obj = gb_record
if self.sequence_name == '':
self.sequence_name = seqfile.replace('.txt','')
self.sequence_name = seqfile.replace('.gb','')
def codon_usage(self, nt_seq):
'''
Analyzes codon useage from the nucleotide sequence
*args*
**nt_seq**, nucleotide sequence as a string
*returns*
**codon_sensitivity**, a list of codon sensitivity for the nucleotide sequence
**cai**, cai value
'''
codon_usage = np.zeros((1, 21))
gene_len = len(nt_seq)/3
aa_seq = self.nt2aa(nt_seq)
for i in range(len(self.aa_keys)-1):
codon_usage[0, i] = len(re.findall(self.aa_keys[i], aa_seq))
codon_usage[0, 20] = len(re.findall('\*', aa_seq))
codon_norm = codon_usage/gene_len
codon_sensitivity = np.round(codon_norm*self.sensitivity_fast_slow, 2)
cai_codons = []
for i in range(0, len(nt_seq), 3):
cai_codons.append(self.strGeneCopy[nt_seq[i:i+3]] / self.strGeneCopy_fast[nt_seq[i:i+3]])
cai = self.geomean(cai_codons)
return codon_sensitivity, cai, cai_codons
def get_probvec(self):
'''
returns the probe vectors (epitope positions by codon position) associated with the tagged sequence stored in POI
*returns*
**probe_vec**, cumlative probe intensity vector by codon position. Ex: [0,0,0,0,1,1,1,1,2,2,2,3,3,3 etc]
**probe_loc**, epitope posistion as a binary vector, 1 for epitope pos, 0 for everything else
'''
probePositions = []
keylist = list(self.POI.tag_epitopes.keys())
for n in range(len(keylist)):
probePosition = []
key = keylist[n]
probePosition = probePosition + self.POI.tag_epitopes[key]
if probePosition != []:
probePosition = np.unique(probePosition).tolist()
probePositions.append(probePosition)
genelength = self.POI.total_length
pvfull = np.zeros((1, genelength+1)).astype(int).flatten()
if len(probePositions) > 1:
k = 0
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
probePosition = probePositions[k]
k+=1
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
else:
probePosition = probePositions[0]
for n in range(len(keylist)):
pv = np.zeros((1, genelength+1)).astype(int).flatten()
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
for i in range(len(probePosition)):
pv[probePosition[i]:] = i+1
if n > 0:
pvfull = np.vstack((pvfull,pv))
else:
pvfull = pv
numtags = 0
for key in keylist:
if len(self.POI.tag_epitopes[key]) != 0:
numtags += 1
ploc = np.zeros((numtags, self.POI.total_length+1)).astype(int)
numind = 0
for n in range(len(keylist)):
key = keylist[n]
if len(self.POI.tag_epitopes[key]) != 0:
ploc[numind][self.POI.tag_epitopes[key]] = 1
numind += 1
return pvfull, ploc
def simple_model(self, poi, tag, ki,ke):
'''
Simplified model
returns the analytical tau, intensity mean, and intensity variance
calculated from the simplified model
'''
L = poi.total_length #get the total length of the gene
Lm = np.mean(poi.tag_epitopes[tag]) #the mean location of the tag epitopes
L_tag = int((poi.tag_epitopes[tag][-1] - poi.tag_epitopes[tag][0]) / 2)
ke_analytical = L*ke / np.sum(self.get_ui(poi.nt_seq[:-3]))
tau_analytical = L_tag/ke_analytical #analytical tau ie autocovariance time
mean_analytical = ki*tau_analytical* (1.-Lm/float(L)) # mean intensity
var_analytical = ki*tau_analytical* (1.-Lm/float(L))**2 #var intensity
return tau_analytical,mean_analytical,var_analytical
def get_binned_k_emphasize_probes(self,k,bins,pl):
'''
evenly bins elongation rates as best it can.
'''
probe_region_start = np.where(pl > 0)[0]
probe_region_end = np.where(pl > 0)[-1]
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = np.mean(binned_ks[i])/len(binned_ks[i])
return k_binned,k_lens
def get_binned_k(self,k,bins):
'''
evenly bins elongation rates as best it can.
'''
binsize = int(np.floor(len(k)/bins))
binned_ks = []
k_binned = np.zeros(bins)
k_lens = np.ones(bins)*binsize
to_redistribute = len(k)%bins
k_lens[-to_redistribute:] = binsize+1
inds = np.hstack(([0.], np.cumsum(k_lens))).astype(int)
for i in range(0,bins):
binned_ks = binned_ks + [k[inds[i]:inds[i+1]].tolist(),]
for i in range(0,bins):
k_binned[i] = 1/np.mean(1/np.array(binned_ks[i]))
return k_binned,k_lens
def get_binned_probe_vec(self,probe_loc,bins):
'''
bin the probe vector as even as possible
'''
probe_loc = np.atleast_2d(probe_loc)
binsize = int(np.floor(probe_loc.shape[1]/bins))
probeloc_binned = np.zeros(( | np.atleast_2d(probe_loc) | numpy.atleast_2d |
"""Script to measure latency of RegNet.
Usage:
python time_regnet.py \
--model_name=<model_name> \
--crop=<crop> \
--precision=<precision> \
--eval_batch_size=<eval_batch_size> \
--warmup_steps=<warmup_steps> \
--eval_steps=<eval_steps> \
--use_tpu=<use_tpu>
Model names:
RegNet:
regnety800mf, regnety4.0gf, regnety8.0gf
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import numpy as np
import time
import regnet_model
tf.disable_eager_execution()
FLAGS = flags.FLAGS
flags.DEFINE_string(
'model_name',
default='regnety800mf',
help=('Choose from: regnety800mf, regnety4.0gf, regnety8.0gf'))
flags.DEFINE_integer(
'crop',
default=224,
help=('Crop size for ImageNet input.'))
flags.DEFINE_string(
'precision',
default='float16',
help=('Either float16 or float32.'))
flags.DEFINE_integer(
'eval_batch_size',
default=64,
help=('Batch size for evaluation.'))
flags.DEFINE_integer(
'warmup_steps',
default=10,
help=('How many steps to run for warmup.'))
flags.DEFINE_integer(
'eval_steps',
default=100,
help=('How many steps to run for evaluation.'))
flags.DEFINE_boolean(
'use_tpu',
default=False,
help=('Whether or not to run on TPU (affects BatchNormalization layer).'))
def get_model(model_name, input_shape, use_tpu):
# Supplies stem width, slope (w_a), initial width (w_0), quantization (w_m), depth (d), squeeze-excitation ratio, num classes
stem_w = 32 # keeping stem width the same throughout all models
se_r = 0.25
nc = 1000
regnet_params = {
'regnety800mf':{
'stem_w': stem_w,
'w_a': 38.84,
'w_0': 56,
'w_m': 2.4,
'd': 14,
'se_r': se_r,
'nc': nc,
},
'regnety4.0gf':{
'stem_w': stem_w,
'w_a': 31.41,
'w_0': 96,
'w_m': 2.24,
'd': 22,
'se_r': se_r,
'nc': nc,
},
'regnety8.0gf':{
'stem_w': stem_w,
'w_a': 76.82,
'w_0': 192,
'w_m': 2.19,
'd': 17,
'se_r': se_r,
'nc': nc,
}
}
if model_name in regnet_params:
kwargs = regnet_params[model_name]
return regnet_model.RegNet(**kwargs, input_shape=input_shape, use_tpu=use_tpu)
else:
raise ValueError('Unrecognized model name {}'.format(model_name))
def main(unused_argv):
input_shape = (FLAGS.crop, FLAGS.crop, 3)
datatype = np.float16 if FLAGS.precision == 'float16' else np.float32
if FLAGS.precision == 'float16':
tf.keras.backend.set_floatx('float16')
else:
tf.keras.backend.set_floatx('float32')
# Create fake tensor.
data = np.random.rand(FLAGS.eval_batch_size, input_shape[0], input_shape[1], 3).astype(datatype)
data = tf.convert_to_tensor(data, dtype=datatype)
model = get_model(FLAGS.model_name, input_shape, FLAGS.use_tpu)
outputs = model(data)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Warmup.
timev = []
for _ in range(FLAGS.warmup_steps):
sess.run([outputs])
# Time forward pass latency.
timev = []
for _ in range(FLAGS.eval_steps):
startt = time.time()
sess.run([outputs])
endt = time.time()
timev.append(endt - startt)
logging.info('Model: {} (eval_batch_size={}, crop={}, precision={})\nruns: mean={}, min={}, max={}'.format(
FLAGS.model_name, FLAGS.eval_batch_size, FLAGS.crop, FLAGS.precision, np.mean(timev), np.min(timev), np.max(timev)))
logging.info('Step time (ms): {}'.format(
| np.mean(timev) | numpy.mean |
# pylint:disable=missing-module-docstring
import gym
import numpy as np
import torch
from torch import Tensor
DEFAULT_CONFIG = {
"start": [0.0, 1.0],
"end": [8.0, 9.0],
"action_lower_bound": [-1.0, -1.0],
"action_upper_bound": [1.0, 1.0],
"deceleration_zones": {"center": [[0.0, 0.0]], "decay": [2.0]},
"noise": {"loc": [0.0, 0.0], "scale_tril": [[0.3, 0.0], [0.0, 0.3]]},
"horizon": 20,
"init_dist": True,
}
class NavigationEnv(gym.Env):
"""NavigationEnv implements a gym environment for the Navigation
domain.
The agent must navigate from a start position to and end position.
Its actions represent displacements in the 2D plane. Gaussian noise
is added to the final position as to incorporate uncertainty in the
transition. Additionally, the effect of an action might be decreased
by a scalar factor dependent on the proximity of deceleration zones.
Please refer to the AAAI paper for further details:
<NAME>., <NAME>., <NAME>. and <NAME>., 2019, July.
Deep Reactive Policies for Planning in Stochastic Nonlinear Domains.
In Proceedings of the AAAI Conference on Artificial Intelligence.
"""
# pylint:disable=too-many-instance-attributes
metadata = {"render.modes": ["human"]}
def __init__(self, config=None):
self._config = {**DEFAULT_CONFIG, **(config or {})}
self._start = np.array(self._config["start"], dtype=np.float32)
self._end = | np.array(self._config["end"], dtype=np.float32) | numpy.array |
#!/usr/bin/env python3
"""
CLI to process multiple molecules with shared optimization.
"""
import os
from deeperwin.available_gpus import get_free_GPU_id
os.environ['CUDA_VISIBLE_DEVICES'] = get_free_GPU_id()
import argparse
import copy
import logging
import os
import time
from dataclasses import dataclass
from typing import Tuple
import jax.numpy as jnp
import numpy as np
from jax.config import config as jax_config
from jax.lib import xla_bridge
from deeperwin.configuration import Configuration, SharedOptimizationConfig, OptimizationConfig, LoggingConfig
from deeperwin.dispatch import idx_to_job_name, setup_job_dir, prepare_checkpoints
from deeperwin.evaluation import evaluate_wavefunction, build_evaluation_step
from deeperwin.kfac import build_grad_loss_kfac
from deeperwin.loggers import LoggerCollection, build_dpe_root_logger
from deeperwin.mcmc import MCMCState, MetropolisHastingsMonteCarlo, resize_nr_of_walkers
from deeperwin.model import build_log_psi_squared
from deeperwin.optimization import build_grad_loss, build_optimizer
from deeperwin.utils import getCodeVersion, prepare_data_for_logging, get_number_of_params, merge_trainable_params, split_trainable_params, \
calculate_metrics
logger = logging.getLogger("dpe")
@dataclass
class WaveFunctionData:
physical = None
fixed_params = None
unique_trainable_params = None
mcmc_state = None
clipping_params: Tuple[float] = (jnp.array([0.0]).squeeze(), jnp.array([1000.0]).squeeze())
checkpoints = {}
loggers = None
current_metrics = {}
n_opt_epochs: int = 0
last_epoch_optimized: int = 0
def init_wfs(config: Configuration):
wfs = []
mcmc = MetropolisHastingsMonteCarlo(config.mcmc)
physical_configs = config.physical.set_from_changes()
for i, p in enumerate(physical_configs): # self.shared_opt_config.config_changes):
logger.info(f"Init wavefunction {i}...")
# init WF object
wf = WaveFunctionData()
wf.physical = p
# init parameters
new_log_psi_squared, new_trainable_params, wf.fixed_params = build_log_psi_squared(config.model, p)
new_shared_params, wf.unique_trainable_params = split_trainable_params(new_trainable_params,
config.optimization.shared_optimization.shared_modules)
# in case of first WF, set shared_params and log_psi_squared for all WFs
if i == 0:
shared_params = new_shared_params
log_psi_squared = new_log_psi_squared
# initialize and warm up MCMC state of WF
logger.info(f"Starting warm-up for wf {i}...")
wf.mcmc_state = MCMCState.initialize_around_nuclei(config.mcmc.n_walkers_opt, p)
wf.mcmc_state.log_psi_sqr = log_psi_squared(*wf.mcmc_state.model_args,
new_trainable_params,
wf.fixed_params)
wf.mcmc_state = mcmc.run_burn_in_opt(log_psi_squared, (new_trainable_params, wf.fixed_params), wf.mcmc_state)
# make folder for single WF (stores adjusted config and logger data)
job_name = idx_to_job_name(i)
job_dir = setup_job_dir(".", job_name)
# init loggers
loggers = LoggerCollection(config.logging, config.experiment_name + "_" + job_name, save_path=job_name,
prefix=job_name)
loggers.on_run_begin()
loggers.log_tags(config.logging.tags)
loggers.log_metrics(dict(E_hf=wf.fixed_params["E_hf"], E_casscf=wf.fixed_params["E_casscf"]))
loggers.log_param("n_params", get_number_of_params(new_trainable_params))
loggers.log_param("n_params_shared", get_number_of_params(shared_params))
loggers.log_param("n_params_unique", get_number_of_params(wf.unique_trainable_params))
wf.loggers = loggers
# save full config for single wavefunction
config_wf = copy.deepcopy(config)
config_wf.physical = p
config_wf.optimization.shared_optimization = None
config_wf.save(os.path.join(job_dir, "full_config.yml"))
# prepare checkpoints
wf.checkpoints = prepare_checkpoints(job_dir, config.optimization.checkpoints, config_wf) if len(
config.optimization.checkpoints) > 0 else {}
wfs.append(wf)
# build optimizer
if config.optimization.optimizer.name == 'kfac':
grad_loss_func = build_grad_loss_kfac(log_psi_squared, config.optimization.clipping)
else:
grad_loss_func = build_grad_loss(log_psi_squared, config.optimization.clipping)
trainable_params = merge_trainable_params(shared_params, wfs[0].unique_trainable_params)
opt_get_params, optimize_epoch, opt_state, opt_set_params = build_optimizer(log_psi_squared, grad_loss_func,
mcmc, trainable_params,
wfs[0].fixed_params,
config.optimization,
config.mcmc.n_walkers_opt,
mcmc_state=wfs[0].mcmc_state)
return log_psi_squared, mcmc, wfs, shared_params, optimize_epoch, opt_state, opt_get_params, opt_set_params
def update_opt_state(opt_state_old, get_params_func, opt_set_params, unique_trainable_params, shared_modules):
shared_params, _ = split_trainable_params(get_params_func(opt_state_old), shared_modules)
new_params = merge_trainable_params(shared_params, unique_trainable_params)
return opt_set_params(opt_state_old, new_params)
def get_index(n_epoch, wfs, config: SharedOptimizationConfig):
method = config.scheduling_method
if method == "round_robin":
return n_epoch % len(wfs)
elif method == 'stddev':
wf_ages = n_epoch - jnp.array([wf.last_epoch_optimized for wf in wfs])
if n_epoch < len(wfs)*10:
index = n_epoch % len(wfs)
elif jnp.any(wf_ages > config.max_age):
index = jnp.argmax(wf_ages)
else:
stddevs = [wf.current_metrics['E_std'] for wf in wfs]
index = | np.argmax(stddevs) | numpy.argmax |
import os
import numpy as np
import cv2
from collections import defaultdict
import hashlib
import glob
import time
import configparser
import pickle
import matplotlib.pyplot as plt
from sixd_toolkit.pysixd import transform, pose_error, inout
from sixd_toolkit.params import dataset_params
from auto_pose.ae.pysixd_stuff import view_sampler
from auto_pose.eval import eval_plots, eval_utils
from auto_pose.ae import utils as u
def compute_plot_emb_invariance(args_latent, codebook):
encoder = codebook._encoder
dataset = codebook._dataset
Rs, lon_lat, pts = eval_plots.generate_view_points(noof=101)
syn_crops = []
z_train = np.zeros((len(Rs), encoder.latent_space_size))
for R in Rs:
syn_crops.append(dataset.render_rot(R, obj_id=1)/255.)
for a, e in u.batch_iteration_indices(len(Rs), 200):
print(a)
z_train[a:e] = sess.run(encoder.z, feed_dict={
encoder._input: syn_crops[a:e]})
aug = eval(args_latent.get('Emb_invariance', 'aug'))
batch = []
orig_img = (syn_crops[100]*255).astype(np.uint8) # H, W, C, C H W
for i in range(200):
print(i)
img = aug.augment_image(orig_img.copy()).astype(np.float32) / 255.
#img = img.transpose( (1, 2, 0) ) #C H, W 1, 2,
batch.append(img)
batch = np.array(batch)
z_test = sess.run(encoder.z, feed_dict={encoder._input: batch})
eval_plots.compute_pca_plot_embedding(
'', z_train, z_test=z_test, lon_lat=None, save=False, inter_factor=1)
from gl_utils import tiles
import cv2
mean_var = np.mean(np.var(z_test, axis=0))
cv2.imshow('mean_var: %s' % mean_var, tiles(batch, 10, 20))
cv2.waitKey(0)
plt.show()
def plot_latent_revolutions(num_obj, codebook):
encoder = codebook._encoder
dataset = codebook._dataset
# generate PCA directions from all objects
Rs, lon_lat, _ = eval_plots.generate_view_points(noof=201, num_cyclo=5)
all_ztrain = []
for i in range(0, num_obj*2, 4):
syn_crops = []
z_train = np.zeros((len(Rs), encoder.latent_space_size))
for R in Rs:
syn_crops.append(dataset.render_rot(R, obj_id=i)/255.)
for a, e in u.batch_iteration_indices(len(Rs), 200):
print(e)
z_train[a:e] = sess.run(encoder.z, feed_dict={
encoder._input: syn_crops[a:e]})
all_ztrain.append(z_train)
all_ztrain = np.array(all_ztrain).reshape(-1, 128)
pca_all = eval_plots.compute_pca_plot_embedding('', all_ztrain, lon_lat=list(lon_lat)*5, save=False)
Rs, lon_lat, _ = eval_plots.generate_azim_elev_points(noof=36*8)
fig = plt.figure(figsize=(3*num_obj, 3*4))
fig.subplots_adjust(top=0.95, bottom=0.05)
# plt.title('Embedding Principal Components')
imgs = []
axes = []
for o in range(0, num_obj*4):
syn_crops = []
for R in Rs:
if o >= 2*num_obj and o < 3*num_obj:
R_rot = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
syn_crops.append(dataset.render_rot(
np.dot(R, R_rot), obj_id=o)/255.)
else:
syn_crops.append(dataset.render_rot(R, obj_id=o)/255.)
syn_crops = np.array(syn_crops)
imgs.append(syn_crops[np.linspace(
0, len(syn_crops), 8, endpoint=False).astype(np.int32)])
# im = u.tiles(np.array(syn_crops),12,18*4,scale=0.66)
z_train = np.zeros((len(Rs), encoder.latent_space_size))
# cv2.imshow('',im)
# cv2.waitKey(1)
for a, e in u.batch_iteration_indices(len(Rs), 200):
print(e)
z_train[a:e] = sess.run(encoder.z, feed_dict={
encoder._input: syn_crops[a:e]})
# eval_plots.compute_pca_plot_embedding('',z_train,lon_lat=lon_lat,save=False)
ax = fig.add_subplot(4, num_obj, o+1, projection='3d')
# if o>=3*num_obj:
# pca_all=None
eval_plots.compute_pca_plot_azelin(
36*8+1, z_train, pca=pca_all, save=False, inter_factor=1, normalize=False, fig=fig, ax=ax)
axes.append(ax)
axes[-1].legend()
# for j in range(len(Rs)):
# Rs_est = codebook.nearest_rotation(sess, syn_crops[j], top_n=1)
# est_view = dataset.render_rot(Rs_est.squeeze(),obj_id=0)/255.
# cv2.imshow('inserted_view',syn_crops[j])
# cv2.imshow('est_view',est_view)
# cv2.waitKey(0)
def on_move(event):
ax_i = axes.index(event.inaxes)
for ax_ in axes:
# if ax_ is not axes[ax_i]:
ax_.view_init(elev=axes[ax_i].elev, azim=axes[ax_i].azim)
ax_.set_xlim3d(axes[ax_i].get_xlim3d())
ax_.set_ylim3d(axes[ax_i].get_ylim3d())
ax_.set_zlim3d(axes[ax_i].get_zlim3d())
fig.canvas.draw_idle()
c1 = fig.canvas.mpl_connect('motion_notify_event', on_move)
im = u.tiles(np.array(imgs).reshape(-1, 128, 128, 3), num_obj*4, 8, scale=1)
cv2.imshow('', im)
cv2.waitKey(1)
plt.show()
def relative_pose_refinement(sess, args_latent, dataset, codebook):
budget = args_latent.getint('Refinement', 'budget_per_epoch')
epochs = args_latent.getint('Refinement', 'epochs')
sampling_interval_deg = args_latent.getint('Refinement', 'sampling_interval_deg')
top_n_refine = args_latent.getint('Refinement', 'max_num_modalities')
t_z = args_latent.getint('Refinement', 't_z')
num_obj = args_latent.getint('Data', 'num_obj')
num_views = args_latent.getint('Data', 'num_views')
test_class = args_latent.get('Data', 'test_class')
K = eval(dataset._kw['k'])
K = np.array(K).reshape(3,3)
# K[0, 0] = K[0, 0] /2
# K[1, 1] = K[1, 1] /2
render_dims = np.array(eval(dataset._kw['render_dims']))
render_dims[0] = 640
render_dims[1] = 480
K = np.array([[572.4114, 0, 320.], [0, 573.57043, 240], [0, 0, 1]]) # LM
dataset._kw['render_dims'] = '(640,480)'
dataset._kw['k'] = 'np.array([[572.4114, 0, 320.], [0, 573.57043, 240], [0, 0, 1]])'
clip_near = float(dataset._kw['clip_near'])
clip_far = float(dataset._kw['clip_far'])
pad_factor = float(dataset._kw['pad_factor'])
pose_errs = []
pose_errs_refref = []
pose_errs_trans = []
add_errs = []
proj_errs = []
all_model_pts = [np.array(v) for v in dataset.renderer.verts]
diameters = []
for model_pts in all_model_pts:
# model_pts_01 = model_pts * 0.1
vec = model_pts.max(0) - model_pts.min(0)
print(vec)
diameters.append(np.linalg.norm(vec))
res_dict = {'test_class': test_class, 'preds': {}}
for i in range(0, num_obj):
res_dict['preds'][i] = {'R_init': [], 'R_init_pert': [], 'R_1': [], 'R_2': [], 'R_3': [],
't_init': [], 't_init_pert': [], 't_1': [], 't_2': [], 't_3': []}
for j in range(num_views):
random_R = transform.random_rotation_matrix()[:3, :3]
full_target_view, full_target_view_dep= dataset.renderer.render(obj_id=i,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=random_R,
t=np.array([0,0,t_z]),
near=clip_near,
far=clip_far,
random_light=False)
ys, xs = np.nonzero(full_target_view_dep > 0)
target_bb = view_sampler.calc_2d_bbox(xs, ys, render_dims)
target_view = dataset.extract_square_patch(full_target_view, target_bb, pad_factor)
angle_off = 2*np.pi
while abs(angle_off) > 45/180.*np.pi:
# rand_direction = transform.make_rand_vector(3)
# rand_angle = np.random.normal(0, 45/180.*np.pi)
# R_off = transform.rotation_matrix(rand_angle, rand_direction)[:3, :3]
rand_angle_x = np.random.normal(0,15/180.*np.pi)
rand_angle_y = np.random.normal(0,15/180.*np.pi)
rand_angle_z = np.random.normal(0,15/180.*np.pi)
R_off = transform.euler_matrix(rand_angle_x,rand_angle_y,rand_angle_z)
angle_off,_,_ = transform.rotation_from_matrix(R_off)
random_R_pert = np.dot(R_off[:3, :3], random_R)
random_t_pert = np.array([0,0,t_z]) + np.array([np.random.normal(0,10),np.random.normal(0,10),np.random.normal(0,50)])
print(angle_off * 180 / np.pi)
print(random_t_pert)
full_perturbed_view, _ = dataset.renderer.render(obj_id=i,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=random_R_pert,
t=random_t_pert,
near=clip_near,
far=clip_far,
random_light=False
)
init_perturbed_view = dataset.extract_square_patch(full_perturbed_view, target_bb, pad_factor)
start_time = time.time()
R_refined, _ = codebook.refined_nearest_rotation(sess, target_view, 1, R_init=random_R_pert, t_init=random_t_pert,
budget=budget+10, epochs=epochs, high = sampling_interval_deg/180.*np.pi, obj_id=i,
top_n_refine=top_n_refine, target_bb=target_bb)
refine_R_1 = time.time() -start_time
full_perturbed_view_2, _ = dataset.renderer.render(obj_id=i,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=R_refined[0],
t=random_t_pert,
near=clip_near,
far=clip_far,
random_light=False
)
perturbed_view_2 = dataset.extract_square_patch(full_perturbed_view_2, target_bb, pad_factor)
x_target, y_target, real_scale = multi_scale_template_matching(full_perturbed_view_2, full_target_view, args_latent)
t_refined = np.array([random_t_pert[0]-(x_target-K[0, 2])/K[0, 0]*random_t_pert[2]*real_scale,
random_t_pert[1]-(y_target-K[1, 2])/K[1, 1]*random_t_pert[2]*real_scale,
random_t_pert[2]*real_scale])
refine_t_1 = time.time() - start_time
print(x_target, y_target, real_scale)
print(t_refined)
print('error t: ', t_refined - np.array([0,0,t_z]))
R_refined_refined, _ = codebook.refined_nearest_rotation(sess, target_view, 1, R_init=R_refined[0], t_init=t_refined,
budget=budget, epochs=epochs, high=sampling_interval_deg/2./180.*np.pi, obj_id=i,
top_n_refine=top_n_refine, target_bb=target_bb)
refine_R_2 = time.time() - start_time
full_perturbed_view_3, _ = dataset.renderer.render(obj_id=i,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=R_refined_refined[0],
t=t_refined,
near=clip_near,
far=clip_far,
random_light=False
)
x_target, y_target, real_scale = multi_scale_template_matching(full_perturbed_view_3, full_target_view, args_latent)
t_refined_refined = np.array([t_refined[0]-(x_target-K[0, 2])/K[0, 0]*t_refined[2]*real_scale,
t_refined[1]-(y_target-K[1, 2])/K[1, 1]*t_refined[2]*real_scale,
t_refined[2]*real_scale])
refine_t_2 = time.time() - start_time
R_refined_refined_refined, _ = codebook.refined_nearest_rotation(sess, target_view, 1, R_init=R_refined_refined[0], t_init=t_refined_refined,
budget=budget-10, epochs=epochs, high=sampling_interval_deg/3./180.*np.pi, obj_id=i,
top_n_refine=top_n_refine, target_bb=target_bb)
refine_R_3 = time.time() - start_time
full_perturbed_view_4, _ = dataset.renderer.render(obj_id=i,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=R_refined_refined_refined[0],
t=t_refined_refined,
near=clip_near,
far=clip_far,
random_light=False
)
x_target, y_target, real_scale = multi_scale_template_matching(full_perturbed_view_4, full_target_view, args_latent,last=True)
t_refined_refined_refined = np.array([t_refined_refined[0]-(x_target-K[0, 2])/K[0, 0]*t_refined_refined[2]*real_scale,
t_refined_refined[1]-(y_target-K[1, 2])/K[1, 1]*t_refined_refined[2]*real_scale,
t_refined_refined[2]*real_scale])
refine_t_3 = time.time() - start_time
res_dict['preds'][i]['R_init'].append(np.array(random_R))
res_dict['preds'][i]['R_init_pert'].append(random_R_pert)
res_dict['preds'][i]['R_1'].append(R_refined[0])
res_dict['preds'][i]['R_2'].append(R_refined_refined[0])
res_dict['preds'][i]['R_3'].append(R_refined_refined_refined[0])
res_dict['preds'][i]['t_init'].append(np.array([0, 0, t_z]))
res_dict['preds'][i]['t_init_pert'].append(random_t_pert)
res_dict['preds'][i]['t_1'].append(t_refined)
res_dict['preds'][i]['t_2'].append(t_refined_refined)
res_dict['preds'][i]['t_3'].append(t_refined_refined_refined)
# pose_errs_trans.append(pose_error.te(t_refined_refined, np.array([0, 0, t_z])))
# pose_errs.append(pose_error.re(random_R, R_refined[0]))
# pose_errs_refref.append(pose_error.re(random_R, R_refined_refined_refined[0]))
print('add_recall: ', add_recall_diameter(R_refined_refined_refined[0], t_refined_refined_refined, random_R, np.array([
0, 0, t_z]), {'pts': all_model_pts[i]}, diameters[i]))
# proj_err = pose_error.arp_2d(R_refined_refined_refined[0], t_refined_refined, random_R, np.array([0, 0, t_z]), {'pts': all_model_pts[i]}, K)
# print 'add: ', add_err
# print 'proj: ', proj_err
# add_errs.append(add_err)
# proj_errs.append(proj_err)
# # pose_errs[-1] = np.minimum(pose_errs[-1],np.abs(pose_errs[-1]-180))
print('timings:')
print(refine_R_1)
print(refine_t_1)
print(refine_R_2)
print(refine_t_2)
print(refine_R_3)
print(refine_t_3)
print('object: ', i)
if args_latent.getboolean('Visualization', 'verbose'):
Rs = [R_refined, R_refined, R_refined_refined, R_refined_refined, R_refined_refined_refined]
ts = [random_t_pert, t_refined, t_refined, t_refined_refined, t_refined_refined]
est_views = [full_perturbed_view.copy()]
for R,t in zip(Rs,ts):
est_view, _ = dataset.renderer.render(obj_id=i,
W=render_dims[0],
H=render_dims[1],
K=K.copy(),
R=R[0],
t=t,
near=clip_near,
far=clip_far,
random_light=False
)
est_views.append(est_view)
for p, v in enumerate(est_views):
full_target_view_copy = full_target_view.copy()
start_edge = cv2.Canny(cv2.cvtColor(full_perturbed_view, cv2.COLOR_BGR2GRAY), 80, 200, apertureSize=3)
end_edge = cv2.Canny(cv2.cvtColor(v, cv2.COLOR_BGR2GRAY), 80, 200, apertureSize=3)
red_chan = full_target_view_copy[:, :, 2]
green_chan = full_target_view_copy[:,:, 1]
red_chan[start_edge > 0] = start_edge[start_edge>0]
green_chan[(end_edge > 0) & (start_edge == 0)] = end_edge[(end_edge > 0) & (start_edge == 0)]
full_target_view_copy[:,:, 1] = green_chan
full_target_view_copy[:, :, 2] = red_chan
# cv2.imshow('deep_im_vis', full_target_view_copy/255.)
cv2.imwrite('%s_%s_%s_%s.png' % (test_class,i,j,p), full_target_view_copy)
# cv2.waitKey(0)
if p == 0:
full_target_view_copy = full_target_view.copy()
full_target_view_copy[:,:, 1] = red_chan
# cv2.imshow('deep_im_vis', full_target_view_copy/255.)
cv2.imwrite('%s_%s_%s_%s_init.png' % (test_class,i,j,p), full_target_view_copy)
# cv2.waitKey(0)
# full_perturbed_view_3, _ = dataset.renderer.render(obj_id=i,
# W=render_dims[0],
# H=render_dims[1],
# K=K.copy(),
# R=R_refined[0],
# t=t_refined_refined,
# near=clip_near,
# far=clip_far,
# random_light=False
# )
# perturbed_view_3 = dataset.extract_square_patch(full_perturbed_view_3, target_bb, pad_factor)
# est_view_final = dataset.extract_square_patch(full_est_view_final, target_bb, pad_factor)
# cv2.imshow('goal_view', target_view)
# cv2.imshow('pert_view', init_perturbed_view/255.)
# cv2.imshow('est_view_1', perturbed_view_2/255.)
# cv2.imshow('est_view_2', perturbed_view_3/255.)
# cv2.imshow('est_view_3', est_view_final/255.)
return res_dict
def add_recall_diameter(R_est, t_est, R_gt, t_gt, model_pts, diameter):
add_err = pose_error.add(R_est, t_est, R_gt, t_gt, model_pts)
if add_err < diameter * 0.1:
return 1.
else:
return 0.
def proj_recall_diameter(R_est, t_est, R_gt, t_gt, model_pts, diameter, K):
proj_err = pose_error.arp_2d(R_est, t_est, R_gt, t_gt, model_pts, K)
if proj_err <= 5:
return 1.
else:
return 0.
def compute_pose_errors(res_dict, args_latent, dataset):
num_obj = args_latent.getint('Data', 'num_obj')
num_views = args_latent.getint('Data', 'num_views')
test_class = args_latent.get('Data', 'test_class')
K = eval(dataset._kw['k'])
K = np.array(K).reshape(3,3)
K = np.array([[572.4114, 0, 320.], [0, 573.57043, 240], [0, 0, 1]]) # LM
R_init_errs = []
R_1_errs = []
R_2_errs = []
R_3_errs = []
t_init_errs = []
t_1_errs = []
t_2_errs = []
t_3_errs = []
add_recalls_init = []
add_recalls = []
proj_recalls_init = []
proj_recalls = []
proj_recalls2 = []
all_model_pts = [np.array(v) for v in dataset.renderer.verts]
diameters = []
for model_pts in all_model_pts:
# model_pts_01 = model_pts * 0.1
vec = model_pts.max(0) - model_pts.min(0)
print(vec)
diameters.append(np.linalg.norm(vec))
print(diameters)
for i in range(0, num_obj):
for j in range(num_views):
R_target = res_dict['preds'][i]['R_init'][j]
t_target = res_dict['preds'][i]['t_init'][j]
R_init_errs.append(pose_error.re(R_target, res_dict['preds'][i]['R_init_pert'][j]))
R_1_errs.append(pose_error.re(R_target, res_dict['preds'][i]['R_1'][j]))
R_2_errs.append(pose_error.re(R_target, res_dict['preds'][i]['R_2'][j]))
R_3_errs.append(pose_error.re(R_target, res_dict['preds'][i]['R_3'][j]))
t_init_errs.append(pose_error.te(t_target, res_dict['preds'][i]['t_init_pert'][j]))
t_1_errs.append(pose_error.te(t_target, res_dict['preds'][i]['t_1'][j]))
t_2_errs.append(pose_error.te(t_target, res_dict['preds'][i]['t_2'][j]))
t_3_errs.append(pose_error.te(t_target, res_dict['preds'][i]['t_3'][j]))
add_recalls_init.append(add_recall_diameter(res_dict['preds'][i]['R_init_pert'][j],
res_dict['preds'][i]['t_init_pert'][j], R_target,
t_target, {'pts': all_model_pts[i]}, diameters[i]))
add_recalls.append(add_recall_diameter(res_dict['preds'][i]['R_3'][j],
res_dict['preds'][i]['t_3'][j], R_target,
t_target, {'pts': all_model_pts[i]}, diameters[i]))
proj_recalls_init.append(proj_recall_diameter(res_dict['preds'][i]['R_init_pert'][j],
res_dict['preds'][i]['t_init_pert'][j], R_target,
t_target, {'pts': all_model_pts[i]}, diameters[i], K))
proj_recalls.append(proj_recall_diameter(res_dict['preds'][i]['R_3'][j],
res_dict['preds'][i]['t_3'][j], R_target,
t_target, {'pts': all_model_pts[i]}, diameters[i], K))
proj_recalls2.append(proj_recall_diameter(res_dict['preds'][i]['R_3'][j],
res_dict['preds'][i]['t_2'][j], R_target,
t_target, {'pts': all_model_pts[i]}, diameters[i], K))
R_init_errs = np.array(R_init_errs)
R_1_errs = np.array(R_1_errs)
R_2_errs = np.array(R_2_errs)
R_3_errs = np.array(R_3_errs)
t_init_errs = np.array(t_init_errs)
t_1_errs = np.array(t_1_errs)
t_2_errs = np.array(t_2_errs)
t_3_errs = np.array(t_3_errs)
res = {}
# res['R_init_errs'] = np.array(R_init_errs)
# res['R_1_errs'] = np.array(R_1_errs)
# res['R_2_errs'] = np.array(R_2_errs)
# res['R_3_errs'] = np.array(R_3_errs)
# res['t_init_errs'] = np.array(t_init_errs)
# res['t_1_errs'] = np.array(t_1_errs)
# res['t_2_errs'] = np.array(t_2_errs)
res['mean_add_recall_init'] = | np.mean(add_recalls_init) | numpy.mean |
import numpy as _np
from chainer.dataset import DatasetMixin as _DatasetMixin, concat_examples
from sklearn.externals.joblib import Memory as _Memory
from sklearn.datasets import load_svmlight_file as _load_svmlight_file
class RankingDataset(_DatasetMixin):
"""
Chainer version of a ranking dataset
"""
def __init__(self, feature_vectors, relevance_labels, qids, nr_samples=None,
filter=False, normalize=False):
"""
:param feature_vectors: The numpy 2d array of samples ((query, doc),
feature)
:type feature_vectors: numpy.ndarray
:param relevance_labels: The numpy array relevance labels
:type relevance_labels: numpy.ndarray
:param qids: The query identifiers
:type qids: numpy.ndarray
:param nr_samples: The number of samples (if not provided this is
inferred from x)
:type nr_samples: int
:param filter: Whether to filter out queries with no relevant documents
:type filter: bool
:param normalize: Whether to perform query-level normalization of
features
:type normalize: bool
"""
self.feature_vectors = feature_vectors.astype(_np.float32)
self.relevance_labels = relevance_labels.astype(_np.int32)
self.maximum_relevance = _np.max(self.relevance_labels)
self.minimum_relevance = _np.min(self.relevance_labels)
self.qids = qids.astype(_np.int32)
self.unique_qids = _np.unique(qids)
self.nr_dimensions = self.feature_vectors.shape[1]
# Perform filtering if necessary
if filter is True:
new_unique_qids = []
for i in range(len(self.unique_qids)):
ys = self.relevance_labels[self.qids == self.unique_qids[i]]
if _np.sum(ys) > 0.0:
new_unique_qids.append(self.unique_qids[i])
self.unique_qids = | _np.array(new_unique_qids) | numpy.array |
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# Copyright (c) 2015, <NAME> and <NAME>.
# License: GNU-GPL Style.
# How to cite GBpy:
# Banadaki, <NAME>. & <NAME>. "An efficient algorithm for computing the primitive bases of a general lattice plane",
# Journal of Applied Crystallography 48, 585-588 (2015). doi:10.1107/S1600576715004446
import numpy as np
import sys
import pickle
import os
from . import quaternion as quat
# -----------------------------------------------------------------------------------------------------------
def check_cond(g, cryst_ptgrp, tol):
"""
Function
Parameters
----------------
g: quaternion object
Misorientation
cryst_ptgrp: str
Crystallogrphic point group in Schoenflies notation
tol: float
Tolerance for the misorientation to belong in the fundamental zone
Returns
------------
True or False: Boolean
Depending on whether or not the misorientation is a disorientation
"""
q0 = quat.getq0(g)
q1 = quat.getq1(g)
q2 = quat.getq2(g)
q3 = quat.getq3(g)
if cryst_ptgrp == 'D3' or cryst_ptgrp == 'D3d':
cond1 = q3 > -tol
cond2 = q3 - q0/ | np.sqrt(3) | numpy.sqrt |
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import librosa.display as dsp
import numpy as np
import tensorflow as tf
def _assert_valid_input_type(s):
assert s == 'mulaw-quantize' or s == 'mulaw' or s == 'raw'
def is_mulaw_quantize(s):
_assert_valid_input_type(s)
return s == 'mulaw-quantize'
def is_mulaw(s):
_assert_valid_input_type(s)
return s == 'mulaw'
def is_raw(s):
_assert_valid_input_type(s)
return s == 'raw'
def is_scalar_input(s):
return is_raw(s) or is_mulaw(s)
#From https://github.com/r9y9/nnmnkwii/blob/master/nnmnkwii/preprocessing/generic.py
def mulaw(x, mu=256):
"""Mu-Law companding
Method described in paper [1]_.
.. math::
f(x) = sign(x) ln (1 + mu |x|) / ln (1 + mu)
Args:
x (array-like): Input signal. Each value of input signal must be in
range of [-1, 1].
mu (number): Compression parameter ``μ``.
Returns:
array-like: Compressed signal ([-1, 1])
See also:
:func:`nnmnkwii.preprocessing.inv_mulaw`
:func:`nnmnkwii.preprocessing.mulaw_quantize`
:func:`nnmnkwii.preprocessing.inv_mulaw_quantize`
.. [1] Brokish, <NAME>., and <NAME>. "A-law and mu-law companding
implementations using the tms320c54x." SPRA163 (1997).
"""
mu = 255
return _sign(x) * _log1p(mu * _abs(x)) / _log1p(mu)
def inv_mulaw(y, mu=256):
"""Inverse of mu-law companding (mu-law expansion)
.. math::
f^{-1}(x) = sign(y) (1 / mu) (1 + mu)^{|y|} - 1)
Args:
y (array-like): Compressed signal. Each value of input signal must be in
range of [-1, 1].
mu (number): Compression parameter ``μ``.
Returns:
array-like: Uncomprresed signal (-1 <= x <= 1)
See also:
:func:`nnmnkwii.preprocessing.inv_mulaw`
:func:`nnmnkwii.preprocessing.mulaw_quantize`
:func:`nnmnkwii.preprocessing.inv_mulaw_quantize`
"""
mu = 255
return _sign(y) * (1.0 / mu) * ((1.0 + mu)**_abs(y) - 1.0)
def mulaw_quantize(x, mu=256):
"""Mu-Law companding + quantize
Args:
x (array-like): Input signal. Each value of input signal must be in
range of [-1, 1].
mu (number): Compression parameter ``μ``.
Returns:
array-like: Quantized signal (dtype=int)
- y ∈ [0, mu] if x ∈ [-1, 1]
- y ∈ [0, mu) if x ∈ [-1, 1)
.. note::
If you want to get quantized values of range [0, mu) (not [0, mu]),
then you need to provide input signal of range [-1, 1).
Examples:
>>> from scipy.io import wavfile
>>> import pysptk
>>> import numpy as np
>>> from nnmnkwii import preprocessing as P
>>> fs, x = wavfile.read(pysptk.util.example_audio_file())
>>> x = (x / 32768.0).astype(np.float32)
>>> y = P.mulaw_quantize(x)
>>> print(y.min(), y.max(), y.dtype)
15 246 int64
See also:
:func:`nnmnkwii.preprocessing.mulaw`
:func:`nnmnkwii.preprocessing.inv_mulaw`
:func:`nnmnkwii.preprocessing.inv_mulaw_quantize`
"""
mu = 255
y = mulaw(x, mu)
# scale [-1, 1] to [0, mu]
return _asint((y + 1) / 2 * mu)
def inv_mulaw_quantize(y, mu=256):
"""Inverse of mu-law companding + quantize
Args:
y (array-like): Quantized signal (∈ [0, mu]).
mu (number): Compression parameter ``μ``.
Returns:
array-like: Uncompressed signal ([-1, 1])
Examples:
>>> from scipy.io import wavfile
>>> import pysptk
>>> import numpy as np
>>> from nnmnkwii import preprocessing as P
>>> fs, x = wavfile.read(pysptk.util.example_audio_file())
>>> x = (x / 32768.0).astype(np.float32)
>>> x_hat = P.inv_mulaw_quantize(P.mulaw_quantize(x))
>>> x_hat = (x_hat * 32768).astype(np.int16)
See also:
:func:`nnmnkwii.preprocessing.mulaw`
:func:`nnmnkwii.preprocessing.inv_mulaw`
:func:`nnmnkwii.preprocessing.mulaw_quantize`
"""
# [0, m) to [-1, 1]
mu = 255
y = 2 * _asfloat(y) / mu - 1
return inv_mulaw(y, mu)
def _sign(x):
#wrapper to support tensorflow tensors/numpy arrays
isnumpy = isinstance(x, np.ndarray)
isscalar = np.isscalar(x)
return np.sign(x) if (isnumpy or isscalar) else tf.sign(x)
def _log1p(x):
#wrapper to support tensorflow tensors/numpy arrays
isnumpy = isinstance(x, np.ndarray)
isscalar = np.isscalar(x)
return np.log1p(x) if (isnumpy or isscalar) else tf.log1p(x)
def _abs(x):
#wrapper to support tensorflow tensors/numpy arrays
isnumpy = isinstance(x, np.ndarray)
isscalar = np.isscalar(x)
return np.abs(x) if (isnumpy or isscalar) else tf.abs(x)
def _asint(x):
#wrapper to support tensorflow tensors/numpy arrays
isnumpy = isinstance(x, np.ndarray)
isscalar = | np.isscalar(x) | numpy.isscalar |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import os
import theano
import numpy
from wordclasses import TheanoBigramOptimizer, NumpyBigramOptimizer
from theanolm.vocabulary import Vocabulary
from theanolm.vocabulary import compute_word_counts, BigramStatistics
class TestBigramOptimizer(unittest.TestCase):
def setUp(self):
theano.config.compute_test_value = 'warn'
script_path = os.path.dirname(os.path.realpath(__file__))
sentences_path = os.path.join(script_path, 'sentences.txt')
self.sentences_file = open(sentences_path)
self.num_classes = 2
word_counts = compute_word_counts([self.sentences_file])
self.vocabulary = Vocabulary.from_word_counts(word_counts,
self.num_classes)
self.sentences_file.seek(0)
self.statistics = BigramStatistics([self.sentences_file], self.vocabulary)
def tearDown(self):
self.sentences_file.close()
def assert_optimizers_equal(self, numpy_optimizer, theano_optimizer):
self.assertTrue(numpy.array_equal(numpy_optimizer._word_counts, theano_optimizer._word_counts.get_value()))
self.assertEqual((numpy_optimizer._ww_counts - theano_optimizer._ww_counts.get_value()).nnz, 0)
self.assertTrue(numpy.array_equal(numpy_optimizer._class_counts, theano_optimizer._class_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._cc_counts, theano_optimizer._cc_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._cw_counts, theano_optimizer._cw_counts.get_value()))
self.assertTrue(numpy.array_equal(numpy_optimizer._wc_counts, theano_optimizer._wc_counts.get_value()))
def test_statistics(self):
num_words = 8
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
self.assertEqual(theano_optimizer.vocabulary_size, num_words)
self.assertEqual(numpy_optimizer.vocabulary_size, num_words)
self.assertEqual(theano_optimizer.num_classes, self.num_classes + 3)
self.assertEqual(numpy_optimizer.num_classes, self.num_classes + 3)
self.assertEqual(len(theano_optimizer._word_to_class.get_value()), num_words)
self.assertEqual(len(numpy_optimizer._word_to_class), num_words)
sos_word_id = self.vocabulary.word_to_id['<s>']
a_word_id = self.vocabulary.word_to_id['a']
b_word_id = self.vocabulary.word_to_id['b']
c_word_id = self.vocabulary.word_to_id['c']
d_word_id = self.vocabulary.word_to_id['d']
e_word_id = self.vocabulary.word_to_id['e']
unk_word_id = self.vocabulary.word_to_id['<unk>']
eos_word_id = self.vocabulary.word_to_id['</s>']
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertEqual(len(numpy_optimizer._word_counts), num_words)
self.assertEqual(numpy_optimizer._word_counts[sos_word_id], 11)
self.assertEqual(numpy_optimizer._word_counts[a_word_id], 13)
self.assertEqual(numpy_optimizer._word_counts[b_word_id], 8)
self.assertEqual(numpy_optimizer._word_counts[c_word_id], 8)
self.assertEqual(numpy_optimizer._word_counts[d_word_id], 11)
self.assertEqual(numpy_optimizer._word_counts[e_word_id], 15)
self.assertEqual(numpy_optimizer._word_counts[unk_word_id], 0)
self.assertEqual(numpy_optimizer._word_counts[eos_word_id], 11)
self.assertEqual(numpy_optimizer._ww_counts.shape[0], num_words)
self.assertEqual(numpy_optimizer._ww_counts.shape[1], num_words)
self.assertEqual(len(numpy_optimizer._class_counts), self.num_classes + 3)
self.assertEqual(numpy_optimizer._cc_counts.shape[0], self.num_classes + 3)
self.assertEqual(numpy_optimizer._cw_counts.shape[0], self.num_classes + 3)
self.assertEqual(numpy_optimizer._cw_counts.shape[1], num_words)
self.assertEqual(numpy_optimizer._wc_counts.shape[0], num_words)
self.assertEqual(numpy_optimizer._wc_counts.shape[1], self.num_classes + 3)
def test_move_and_back(self):
numpy_optimizer = NumpyBigramOptimizer(self.statistics, self.vocabulary)
theano_optimizer = TheanoBigramOptimizer(self.statistics, self.vocabulary)
orig_class_counts = numpy.copy(numpy_optimizer._class_counts)
orig_cc_counts = numpy.copy(numpy_optimizer._cc_counts)
orig_cw_counts = numpy.copy(numpy_optimizer._cw_counts)
orig_wc_counts = numpy.copy(numpy_optimizer._wc_counts)
word_id = self.vocabulary.word_to_id['d']
orig_class_id = numpy_optimizer.get_word_class(word_id)
new_class_id = 3 if orig_class_id != 3 else 4
numpy_optimizer._move(word_id, new_class_id)
theano_optimizer._move(word_id, new_class_id)
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertEqual(numpy.count_nonzero(numpy_optimizer._class_counts != orig_class_counts), 2)
self.assertEqual(numpy.sum(numpy_optimizer._class_counts), numpy.sum(orig_class_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._cc_counts != orig_cc_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._cc_counts), numpy.sum(orig_cc_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._cw_counts != orig_cw_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._cw_counts), numpy.sum(orig_cw_counts))
self.assertGreater(numpy.count_nonzero(numpy_optimizer._wc_counts != orig_wc_counts), 0)
self.assertEqual(numpy.sum(numpy_optimizer._wc_counts), numpy.sum(orig_wc_counts))
numpy_optimizer._move(word_id, orig_class_id)
theano_optimizer._move(word_id, orig_class_id)
self.assert_optimizers_equal(numpy_optimizer, theano_optimizer)
self.assertTrue(numpy.array_equal(numpy_optimizer._class_counts, orig_class_counts))
self.assertTrue(numpy.array_equal(numpy_optimizer._cc_counts, orig_cc_counts))
self.assertTrue(numpy.array_equal(numpy_optimizer._cw_counts, orig_cw_counts))
self.assertTrue(numpy.array_equal(numpy_optimizer._wc_counts, orig_wc_counts))
def test_move_and_recompute(self):
optimizer1 = NumpyBigramOptimizer(self.statistics, self.vocabulary)
word_id = self.vocabulary.word_to_id['d']
orig_class_id = optimizer1.get_word_class(word_id)
new_class_id = 3 if orig_class_id != 3 else 4
optimizer1._word_to_class[word_id] = new_class_id
counts = optimizer1._compute_class_statistics(optimizer1._word_counts,
optimizer1._ww_counts,
optimizer1._word_to_class)
class_counts = numpy.zeros(optimizer1.num_classes, 'int32')
cc_counts = numpy.zeros((optimizer1.num_classes, optimizer1.num_classes), dtype='int32')
cw_counts = numpy.zeros((optimizer1.num_classes, optimizer1.vocabulary_size), dtype='int32')
wc_counts = numpy.zeros((optimizer1.vocabulary_size, optimizer1.num_classes), dtype='int32')
for wid, cid in enumerate(optimizer1._word_to_class):
class_counts[cid] += optimizer1._word_counts[wid]
for left_wid, right_wid in zip(*optimizer1._ww_counts.nonzero()):
count = optimizer1._ww_counts[left_wid, right_wid]
left_cid = optimizer1._word_to_class[left_wid]
right_cid = optimizer1._word_to_class[right_wid]
cc_counts[left_cid,right_cid] += count
cw_counts[left_cid,right_wid] += count
wc_counts[left_wid,right_cid] += count
self.assertTrue( | numpy.array_equal(class_counts, counts[0]) | numpy.array_equal |
# -*- coding: utf-8 -*-
## @package inversetoon.core.normal_cone
#
# Normal cone class.
# @author tody
# @date 2015/08/11
import numpy as np
from inversetoon.np.norm import normalizeVectors, normalizeVector
from inversetoon.core.transform import coordinateFrame
from inversetoon.util.logger import getLogger
logger = getLogger(__name__)
## Provide normal interpolation based on normal cone.
class NormalConeInterpolation:
## Constructor
# @param N1 normal vector: from.
# @param N2 normal vector: to.
# @param L light vector.
def __init__(self, N1, N2, L=[0.3, 0.5, 0.7], I=None):
self._L = L
self._I = I
self._Lxyz = coordinateFrame(self._L)
self._N1 = N1
self._N2 = N2
self.computeCenter()
self.computeConeAngles()
def computeCenter(self):
if self._I is None:
self._I = 0.5 * np.dot(self._L, self._N1 + self._N2)
self._center = self._I * self._L
def computeConeCoordinate(self, N):
dN = N - self._center
dN_x = np.dot(dN, self._Lxyz[0])
dN_y = np.dot(dN, self._Lxyz[1])
return dN_x, dN_y
def computeConeAngle(self, N):
dN_x, dN_y = self.computeConeCoordinate(N)
return np.arctan2(dN_x, dN_y)
def computeConeAngles(self):
self._theta1 = self.computeConeAngle(self._N1)
self._theta2 = self.computeConeAngle(self._N2)
def interpolate(self, parameters):
return self.interpolate_simple(parameters)
def interpolate_simple(self, parameters):
dN1 = self._N1 - self._center
dN2 = self._N2 - self._center
dNs = np.array([(1.0 - t) * dN1 + t * dN2 for t in parameters])
normals = self._center + dNs
normals = normalizeVectors(normals)
return normals
class NormalCone:
## Constructor
def __init__(self, L=[0.3, 0.5, 0.7], I=0.7, Ns=[]):
self._L = normalizeVector(np.array(L))
self._I = I
self._Ns = Ns
self._Lxyz = coordinateFrame(self._L)
self.computeCone()
self.computeAxisCenter()
self.computeConeAngles()
self.computeConeAngleChanges()
def setNormals(self, Ns):
self._Ns = Ns
def normals(self):
return self._Ns
def setConeAngles(self, thetas):
self._thetas = thetas
def coneAngles(self):
return self._thetas
def coneCoordinates(self):
return self._N_ts
def coneAngleChanges(self):
return self._dthetas
def computeCone(self):
h_cone = self._I
r_cone = np.sqrt(1.0 - h_cone ** 2)
self._h_cone = h_cone
self._r_cone = r_cone
def computeAxisCenter(self):
self._center = self._I * self._L
def computeConeAngles(self):
thetas = []
N_ts = []
for N in self._Ns:
N_t = N - self._center
N_tx = np.dot(N_t, self._Lxyz[0])
N_ty = np.dot(N_t, self._Lxyz[1])
thetas.append(np.arctan2(N_tx, N_ty))
N_ts.append((N_tx, N_ty))
self._thetas = thetas
theta_min = np.min(thetas)
theta_max = np.max(thetas)
self._theta_range = [theta_min, theta_max]
self._N_ts = np.array(N_ts)
def computeConeAngleChanges(self):
dthetas = np.zeros(self._N_ts.shape[0])
dthetas[:-1] = np.cross(self._N_ts[1:, :], self._N_ts[:-1, :])
dthetas[-1] = dthetas[-2]
self._dthetas = | np.array(dthetas) | numpy.array |
from matplotlib import pyplot as plt
import numpy as np
# numpy's fft implementation is slow, so use FFTW as a drop-in replacement
import pyfftw.interfaces.numpy_fft as fft
from math import floor
# Load an image
image = plt.imread("einstein1_7.jpg")
# Make image greyscale
image = np.average(image, axis=2)
# Set up geometry
width, height = image.shape
centre_x = floor(width) / 2
centre_y = floor(height) / 2
x_axis = np.arange(width)
y_axis = np.arange(height)
# Create circular filter
filter_radius = 100
filter = np.zeros_like(image)
for i in range(width):
for j in range(height):
if (i - centre_x) ** 2 + (j - centre_y) ** 2 > filter_radius ** 2:
filter[i, j] = 1
# Invert: lowpass filter
filter = 1 - filter
# Move to Fourier plane
FT = fft.fftshift(fft.fft2(image))
# Apply Fourier filter
filtered_FT = FT * filter
# Inverse Fourier transform to image plane
filtered_image = fft.ifft2(fft.ifftshift(filtered_FT))
# Plot results
plt.figure(figsize=[20, 20])
# Original image
plt.subplot(2, 2, 1)
plt.title("Original image")
plt.imshow(image)
# Fourier transform; log scale to bring out detail outside centre
plt.subplot(2, 2, 2)
plt.title("Fourier transform of original image")
plt.imshow(np.log(np.abs(FT) ** 2))
# Filtered fourier transform
plt.subplot(2, 2, 3)
plt.title("Fourier transform restricted to leading modes")
plt.imshow(np.log(np.abs(filtered_FT) ** 2))
# Observed image
plt.subplot(2, 2, 4)
plt.title("Observed image with filter")
plt.imshow(np.abs(filtered_image) ** 2)
plt.savefig("fourier_restricted.pdf")
def salt_pepper(image, r):
'''Add random light and dark pixels with frequency `r` to `image`,
returning the result.'''
r = min(1, r)
uniform_random = np.random.random(image.shape)
# Pepper
noisy_image = np.where(uniform_random < r / 2,
np.zeros_like(image),
image)
# Salt
noisy_image = np.where(uniform_random > 1 - r / 2,
np.ones_like(image) * 255,
noisy_image)
return noisy_image
# Add noise to image
noisy_image = salt_pepper(image, 0.1)
# Reuse filter from previous task
lowpass_filter = 1 - filter
# Move to Fourier plane
FT = fft.fftshift(fft.fft2(noisy_image))
# Apply Fourier filter
filtered_FT = FT * lowpass_filter
# Inverse Fourier transform to image plane
filtered_image = fft.ifft2(fft.ifftshift(filtered_FT))
# Plot results
plt.figure(figsize=[15, 15])
# Original image
plt.subplot(2, 2, 1)
plt.title("Image with noise added")
# White noise doesn't show up very well with the default colour map
plt.imshow(noisy_image, cmap='gray')
# Fourier transform; log scale to bring out detail outside centre
plt.subplot(2, 2, 2)
plt.title("Fourier transport of noisy image")
plt.imshow(np.log( | np.abs(FT) | numpy.abs |
import codecs
import csv
import math
import multiprocessing
import os
import xml.etree.cElementTree as et
from functools import partial
import multiprocessing
from pathlib import Path
import sys
from PyQt5.QtCore import pyqtSlot
from tqdm import tqdm
import time as clock
import matplotlib.pyplot as plt
import napari
import numpy as np
import pandas as pd
from matplotlib.backends.backend_qt5agg import \
FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QComboBox, QPushButton, QSlider
from scipy import spatial
from scipy.fftpack import fft, fftfreq, fftshift, ifft
from skimage import measure
from skimage.filters import sobel
from skimage.measure import label, regionprops
from skimage.segmentation import find_boundaries
from tifffile import imread, imwrite
from .napari_animation import AnimationWidget
import dask as da
from dask.array.image import imread as daskread
from skimage.util import map_array
import seaborn as sns
from scipy.stats import norm
from scipy.optimize import curve_fit
from lmfit import Model
from numpy import exp, loadtxt, pi, sqrt
from matplotlib import cm
'''Define function to run multiple processors and pool the results together'''
Boxname = 'TrackBox'
AttributeBoxname = 'AttributeIDBox'
TrackAttributeBoxname = 'TrackAttributeIDBox'
pd.options.display.float_format = '${:,.2f}'.format
savedir = None
ParentDistances = {}
ChildrenDistances = {}
timed_mask = {}
AllStartParent = {}
AllEndParent = {}
AllID = []
AllStartChildren = {}
AllEndChildren = {}
DividingTrackIds = []
NonDividingTrackIds = []
AllTrackIds = []
SaveIds = []
globalcount = "0"
parentstartid = []
parentstartdist = []
parentendid = []
parentenddist = []
childrenstartid = []
childrenstartdist = []
childrenendid = []
childrenenddist = []
def prob_sigmoid(x):
return 1 - math.exp(-x)
def CreateTrackCheckpoint(ImageName, LabelName, MaskName, Name, savedir):
Mask = None
Label = imread(LabelName)
Image = imread(ImageName)
if MaskName is not None:
Mask = imread(MaskName)
assert Image.shape == Label.shape
TimeList = []
XList = []
YList = []
ZList = []
LabelList = []
PerimeterList = []
VolumeList = []
IntensityList = []
ExtentXList = []
ExtentYList = []
ExtentZList = []
print('Image has shape:', Image.shape)
print('Image Dimensions:', len(Image.shape))
if Mask is not None:
if len(Mask.shape) < len(Image.shape):
# T Z Y X
UpdateMask = np.zeros(
[Label.shape[0], Label.shape[1], Label.shape[2], Label.shape[3]]
)
for i in range(0, UpdateMask.shape[0]):
for j in range(0, UpdateMask.shape[1]):
UpdateMask[i, j, :, :] = Mask[i, :, :]
Mask = UpdateMask
for i in tqdm(range(0, Image.shape[0])):
CurrentSegimage = Label[i, :].astype('uint16')
Currentimage = Image[i, :]
if Mask is not None:
CurrentSegimage[Mask[i, :] == 0] = 0
properties = measure.regionprops(CurrentSegimage, Currentimage)
for prop in properties:
Z = prop.centroid[0]
Y = prop.centroid[1]
X = prop.centroid[2]
regionlabel = prop.label
intensity = np.sum(prop.image)
sizeZ = abs(prop.bbox[0] - prop.bbox[3])
sizeY = abs(prop.bbox[1] - prop.bbox[4])
sizeX = abs(prop.bbox[2] - prop.bbox[5])
volume = sizeZ * sizeX * sizeY
radius = math.pow(3 * volume / (4 * math.pi), 1.0 / 3.0)
perimeter = 2 * math.pi * radius
TimeList.append(i)
XList.append(int(X))
YList.append(int(Y))
ZList.append(int(Z))
LabelList.append(regionlabel)
VolumeList.append(volume)
PerimeterList.append(perimeter)
IntensityList.append(intensity)
ExtentZList.append(sizeZ)
ExtentXList.append(sizeX)
ExtentYList.append(sizeY)
df = pd.DataFrame(
list(
zip(
TimeList,
XList,
YList,
ZList,
LabelList,
PerimeterList,
VolumeList,
IntensityList,
ExtentXList,
ExtentYList,
ExtentZList,
)
),
index=None,
columns=[
'T',
'X',
'Y',
'Z',
'Label',
'Perimeter',
'Volume',
'Intensity',
'ExtentX',
'ExtentY',
'ExtentZ',
],
)
df.to_csv(savedir + '/' + 'FijibTMcheckpoint' + Name + '.csv', index=False)
def GetBorderMask(Mask):
ndim = len(Mask.shape)
# YX shaped object
if ndim == 2:
Mask = label(Mask)
Boundary = find_boundaries(Mask)
# TYX shaped object
if ndim == 3:
Boundary = np.zeros([Mask.shape[0], Mask.shape[1], Mask.shape[2]])
for i in range(0, Mask.shape[0]):
Mask[i, :] = label(Mask[i, :])
Boundary[i, :] = find_boundaries(Mask[i, :])
# TZYX shaped object
if ndim == 4:
Boundary = np.zeros(
[Mask.shape[0], Mask.shape[1], Mask.shape[2], Mask.shape[3]]
)
# Loop over time
for i in range(0, Mask.shape[0]):
Mask[i, :] = label(Mask[i, :])
for j in range(0, Mask.shape[1]):
Boundary[i, j, :, :] = find_boundaries(Mask[i, j, :, :])
return Boundary
"""
Convert an integer image into boundary points for 2,3 and 4D data
"""
def boundary_points(mask, xcalibration, ycalibration, zcalibration):
ndim = len(mask.shape)
# YX shaped object
if ndim == 2:
mask = label(mask)
labels = []
size = []
tree = []
properties = measure.regionprops(mask, mask)
for prop in properties:
labelimage = prop.image
regionlabel = prop.label
sizey = abs(prop.bbox[0] - prop.bbox[2]) * xcalibration
sizex = abs(prop.bbox[1] - prop.bbox[3]) * ycalibration
volume = sizey * sizex
radius = math.sqrt(volume / math.pi)
boundary = find_boundaries(labelimage)
indices = np.where(boundary > 0)
indices = np.transpose(np.asarray(indices))
real_indices = indices.copy()
for j in range(0, len(real_indices)):
real_indices[j][0] = real_indices[j][0] * xcalibration
real_indices[j][1] = real_indices[j][1] * ycalibration
tree.append(spatial.cKDTree(real_indices))
if regionlabel not in labels:
labels.append(regionlabel)
size.append(radius)
# This object contains list of all the points for all the labels in the Mask image with the label id and volume of each label
timed_mask[str(0)] = [tree, indices, labels, size]
# TYX shaped object
if ndim == 3:
Boundary = np.zeros([mask.shape[0], mask.shape[1], mask.shape[2]])
for i in tqdm(range(0, mask.shape[0])):
mask[i, :] = label(mask[i, :])
properties = measure.regionprops(mask[i, :], mask[i, :])
labels = []
size = []
tree = []
for prop in properties:
labelimage = prop.image
regionlabel = prop.label
sizey = abs(prop.bbox[0] - prop.bbox[2]) * ycalibration
sizex = abs(prop.bbox[1] - prop.bbox[3]) * xcalibration
volume = sizey * sizex
radius = math.sqrt(volume / math.pi)
boundary = find_boundaries(labelimage)
indices = | np.where(boundary > 0) | numpy.where |
from __future__ import division
import numpy as np
from scipy.sparse import issparse
from .linalg import dot_inplace_right
def eigenvalue_decomposition(C, is_inverse=False, eps=1e-10):
r"""
Eigenvalue decomposition of a given covariance (or scatter) matrix.
Parameters
----------
C : ``(N, N)`` `ndarray` or `scipy.sparse`
The Covariance/Scatter matrix. If it is a `numpy.array`, then
`numpy.linalg.eigh` is used. If it is an instance of `scipy.sparse`,
then `scipy.sparse.linalg.eigsh` is used. If it is a precision matrix
(inverse covariance), then set `is_inverse=True`.
is_inverse : `bool`, optional
It ``True``, then it is assumed that `C` is a precision matrix (
inverse covariance). Thus, the eigenvalues will be inverted. If
``False``, then it is assumed that `C` is a covariance matrix.
eps : `float`, optional
Tolerance value for positive eigenvalue. Those eigenvalues smaller
than the specified eps value, together with their corresponding
eigenvectors, will be automatically discarded. The final
limit is computed as ::
limit = np.max(np.abs(eigenvalues)) * eps
Returns
-------
pos_eigenvectors : ``(N, p)`` `ndarray`
The matrix with the eigenvectors corresponding to positive eigenvalues.
pos_eigenvalues : ``(p,)`` `ndarray`
The array of positive eigenvalues.
"""
# compute eigenvalue decomposition
if issparse(C):
from scipy.sparse.linalg import eigsh
eigenvalues, eigenvectors = eigsh(C, k=C.shape[0] - 1)
else:
eigenvalues, eigenvectors = np.linalg.eigh(C)
# sort eigenvalues from largest to smallest
index = np.argsort(eigenvalues)[::-1]
eigenvalues = eigenvalues[index]
eigenvectors = eigenvectors[:, index]
# set tolerance limit
limit = np.max(np.abs(eigenvalues)) * eps
# select positive eigenvalues
pos_index = eigenvalues > 0.0
pos_eigenvalues = eigenvalues[pos_index]
pos_eigenvectors = eigenvectors[:, pos_index]
# check they are within the expected tolerance
index = pos_eigenvalues > limit
pos_eigenvalues = pos_eigenvalues[index]
pos_eigenvectors = pos_eigenvectors[:, index]
# if C was a precision matrix (inverse covariance), then invert and re-sort
# the eigenvalues
if is_inverse:
pos_eigenvalues = pos_eigenvalues[::-1] ** -1
pos_eigenvectors = pos_eigenvectors[:, ::-1]
return pos_eigenvectors, pos_eigenvalues
def pca(X, centre=True, inplace=False, eps=1e-10):
r"""
Apply Principal Component Analysis (PCA) on the data matrix `X`. In the case
where the data matrix is very large, it is advisable to set
``inplace = True``. However, note this destructively edits the data matrix
by subtracting the mean inplace.
Parameters
----------
X : ``(n_samples, n_dims)`` `ndarray`
Data matrix.
centre : `bool`, optional
Whether to centre the data matrix. If `False`, zero will be subtracted.
inplace : `bool`, optional
Whether to do the mean subtracting inplace or not. This is crucial if
the data matrix is greater than half the available memory size.
eps : `float`, optional
Tolerance value for positive eigenvalue. Those eigenvalues smaller
than the specified eps value, together with their corresponding
eigenvectors, will be automatically discarded.
Returns
-------
U (eigenvectors) : ``(``(n_components, n_dims)``)`` `ndarray`
Eigenvectors of the data matrix.
l (eigenvalues) : ``(n_components,)`` `ndarray`
Positive eigenvalues of the data matrix.
m (mean vector) : ``(n_dimensions,)`` `ndarray`
Mean that was subtracted from the data matrix.
"""
n, d = X.shape
if centre:
# centre data
# m (mean vector): d
m = np.mean(X, axis=0)
else:
m = np.zeros(d, dtype=X.dtype)
# This is required if the data matrix is very large!
if inplace:
X -= m
else:
X = X - m
if d < n:
# compute covariance matrix
# C (covariance): d x d
C = np.dot(X.conj().T, X) / (n - 1)
# C should be perfectly symmetrical, but numerical error can creep
# in. Enforce symmetry here to avoid creating complex eigenvectors
C = (C + C.conj().T) / 2.0
# perform eigenvalue decomposition
# U (eigenvectors): d x n
# s (eigenvalues): n
U, l = eigenvalue_decomposition(C, is_inverse=False, eps=eps)
# transpose U
# U: n x d
U = U.T
else:
# d > n
# compute small covariance matrix
# C (covariance): n x n
C = np.dot(X, X.conj().T) / (n - 1)
# C should be perfectly symmetrical, but numerical error can creep
# in. Enforce symmetry here to avoid creating complex eigenvectors
C = (C + C.conj().T) / 2.0
# perform eigenvalue decomposition
# V (eigenvectors): n x n
# s (eigenvalues): n
V, l = eigenvalue_decomposition(C, is_inverse=False, eps=eps)
# compute final eigenvectors
# U: n x d
w = np.sqrt(1.0 / ((n - 1) * l))
dot = dot_inplace_right if inplace else np.dot
U = dot(V.conj().T, X)
U *= w[:, None]
return U, l, m
# The default value of eps tolerance is set to 1e-5 (instead of 1e-10 that used
# to be). This is done in order for pcacov to work for inverse single precision C
# i.e. is_inverse=True and dtype=np.float32. 1e-10 works perfectly when the
# covariance matrix has double precision (np.float64). However, if C has single
# precision (np.float32) and is inverse, then the first two eigenvectors end up
# having noise.
def pcacov(C, is_inverse=False, eps=1e-5):
r"""
Apply Principal Component Analysis (PCA) given a covariance/scatter matrix
`C`. In the case where the data matrix is very large, it is advisable to set
``inplace = True``. However, note this destructively edits the data matrix
by subtracting the mean inplace.
Parameters
----------
C : ``(N, N)`` `ndarray` or `scipy.sparse`
The Covariance/Scatter matrix. If it is a precision matrix (inverse
covariance), then set `is_inverse=True`.
is_inverse : `bool`, optional
It ``True``, then it is assumed that `C` is a precision matrix (
inverse covariance). Thus, the eigenvalues will be inverted. If
``False``, then it is assumed that `C` is a covariance matrix.
eps : `float`, optional
Tolerance value for positive eigenvalue. Those eigenvalues smaller
than the specified eps value, together with their corresponding
eigenvectors, will be automatically discarded.
Returns
-------
U (eigenvectors) : ``(n_components, n_dims)`` `ndarray`
Eigenvectors of the data matrix.
l (eigenvalues) : ``(n_components,)`` `ndarray`
Positive eigenvalues of the data matrix.
"""
if C.shape[0] != C.shape[1]:
raise ValueError("C must be square.")
# C should be perfectly symmetrical, but numerical error can creep in.
# Enforce symmetry here to avoid creating complex eigenvectors
C = (C + C.conj().T) / 2.0
# C (covariance): d x d
# perform eigenvalue decomposition
# U (eigenvectors): d x n
# s (eigenvalues): n
U, l = eigenvalue_decomposition(C, is_inverse=is_inverse, eps=eps)
# transpose U
# U: n x d
U = U.conj().T
return U, l
def ipca(B, U_a, l_a, n_a, m_a=None, f=1.0, eps=1e-10):
r"""
Perform Incremental PCA on the eigenvectors ``U_a``, eigenvalues ``l_a`` and
mean vector ``m_a`` (if present) given a new data matrix ``B``.
Parameters
----------
B : ``(n_samples, n_dims)`` `ndarray`
New data matrix.
U_a : ``(n_components, n_dims)`` `ndarray`
Eigenvectors to be updated.
l_a : (n_components) `ndarray`
Eigenvalues to be updated.
n_a : `int`
Total number of samples used to produce U_a, s_a and m_a.
m_a : ``(n_dims,)`` `ndarray`, optional
Mean to be updated. If ``None`` or ``(n_dims,)`` `ndarray` filled
with 0s the data matrix will not be centred.
f : ``[0, 1]`` `float`, optional
Forgetting factor that weights the relative contribution of new
samples vs old samples. If 1.0, all samples are weighted equally
and, hence, the results is the exact same as performing batch
PCA on the concatenated list of old and new simples. If <1.0,
more emphasis is put on the new samples. See [1] for details.
eps : `float`, optional
Tolerance value for positive eigenvalue. Those eigenvalues smaller
than the specified eps value, together with their corresponding
eigenvectors, will be automatically discarded.
Returns
-------
U (eigenvectors) : ``(n_components, n_dims)`` `ndarray`
Updated eigenvectors.
s (eigenvalues) : ``(n_components,)`` `ndarray`
Updated positive eigenvalues.
m (mean vector) : ``(n_dims,)`` `ndarray`
Updated mean.
References
----------
.. [1] <NAME>, <NAME>, <NAME>, <NAME>.
"Incremental Learning for Robust Visual Tracking". IJCV, 2007.
"""
# multiply current eigenvalues by total number of samples and square
# root them to obtain singular values of the original data.
s_a = np.sqrt((n_a - 1) * l_a)
# obtain number of dimensions and number of samples of new data.
n_b, d = B.shape
# multiply the number of samples of the original data by the forgetting
# factor
n_a *= f
# total number of samples
n = n_a + n_b
if m_a is not None and not | np.all(m_a == 0) | numpy.all |
# -*- coding: utf-8 -*-
"""
Created on Jul 21 2017, Modified Nov 15 2019.
@authors: <NAME>
Compute prosody features based on pitch, loudness, duration, ratios, rhythm, and perturbations (apq/ppq)
OUTPUT OF THE FUNCTION "prosody_features":
"""
import os
path_base = os.path.dirname(os.path.abspath(__file__))
import numpy as np
import warnings
import sigproc as sg
import scipy as sp
#from scipy.stats import kurtosis, skew
from scipy.signal import gaussian
from scipy.io.wavfile import write
import praat.praat_functions as praatF
#import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error as mse
def prosody_features(sig,fs,f0=np.asarray([0]),winTime=0.04,stepTime=0.01):
if (np.sum(f0)==0)&(len(f0)==1):
f0 = f0_contour_pr(sig,fs,winTime,stepTime)#F0
#VAD
out_VAD = eVAD(sig,fs)
#Compute f0 features
feats_f0 = f0_features(sig,fs,f0,winTime,stepTime)
#Compute voiced features
feats_voiced,vcont = voiced_features(sig,fs,f0,stepTime)
#Compute VAD features (duration+energy content)
feats_VAD = VAD_features(sig,fs,out_VAD,winTime,stepTime)
#Compute unvoiced features
feats_unvoiced = unvoiced_features(sig,fs,vcont,out_VAD['Pause_labels'])
X = [feats_f0,feats_voiced,feats_unvoiced,feats_VAD]
#Create new dictionary with all features
X_pr = {}
for k in X:
for f in list(k.keys()):
X_pr[f] = k[f]
return X_pr
def prosody_features_dynamic(sig,fs,f0=np.asarray([0]),winTime=0.04,stepTime=0.01):
if len(f0)==0:
f0 = f0_contour_pr(sig,fs,winTime,stepTime)#F0
#---------------------------------------
f0coef,voiced,_ = voiced_unvoiced(sig,fs,f0,stepTime)
# f0coef = np.vstack(f0coef)
#Voiced features
lvoiced = []
for v in voiced:
lvoiced.append(len(v)/fs)#Length of voiced segment
lvoiced = np.vstack(lvoiced)
#.........................................................
X = np.hstack([lvoiced,f0coef])
return X
#==========================================================================
def Hz2Semitone(F):
ST=39.87*np.log(F/50)
return ST
#==========================================================================
def f0_contour_pr(sig,fs,sizeframe=0.04,step=0.01,maxf0=500, post=False):
"""
This function is used to extract the F0 contour using praat
"""
sig = sig-np.mean(sig)
sig = sig/np.max(np.abs(sig))
temp_aud = (sig*2**15).astype(np.int16)
temp_path = path_base+'\\temp_sig.wav'#Creates temporal wav file
write(temp_path,int(fs),temp_aud)
temp_filename_f0=path_base+'/praat/tempF0.txt'
np.savetxt(temp_filename_f0,np.zeros((3,3)))
temp_filename_vuv=path_base+'/praat/tempvuv.txt'
np.savetxt(temp_filename_vuv,np.zeros((3,3)))
minf0 = int(3/sizeframe)
praatF.praat_vuv(temp_path, temp_filename_f0, temp_filename_vuv,
time_stepF0=step, minf0=minf0, maxf0=maxf0)
#Tomas: I modified this function. The size of the frame (in seconds) and sampling frequency are
#now input arguments. This was neccesary to compute the number of frames correctly.
f0,_ = praatF.decodeF0(temp_filename_f0,len(sig),float(fs),sizeframe,step)
if np.sum(f0)==0:
print('PITCH WAS NOT DETECTED')
os.remove(temp_filename_f0)
os.remove(temp_filename_vuv)
os.remove(temp_path)
#Post-processing of F0 to avoid outliers. Is very simple
if post==True:
print('F0 post-processing Activated')
uf0 = np.mean(f0[f0>0])
sf0 = np.std(f0[f0>0])
f0[f0>(uf0+(2.5*sf0))] = 0
f0[f0<(uf0-(2.5*sf0))] = 0
return f0
#==========================================================================
def voiced_unvoiced(sig,fs,f0,stepTime):
"""
Voiced unvoiced segmentation
sig: Speech signal
fs: Sampling frequency
f0: Pitch contour
stepTime: Step size (in seconds) used to computed the f0 contour.
"""
yp = f0.copy()
yp[yp!=0] = 1
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
v_segm = []
f0_feats = []#Dynamic f0-based features
#Unvoiced
uv_segm = []
for idx in range(len(lim_ini)):
#------------------------------------
#Voiced segments
tini = int(lim_ini[idx]*stepTime*fs)
tend = int(lim_end[idx]*stepTime*fs)
if int(tend-tini)>int(0.04*fs):
# print(tini,tend)
v_segm.append(sig[tini:tend])
x = np.arange(0,len(f0[lim_ini[idx]:lim_end[idx]]))
#F0 based features
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.RankWarning)
f0c = np.polyfit(x,f0[lim_ini[idx]:lim_end[idx]],5)
# f0c = f0c.reshape(1,-1)#Dynamic reprsentation of f0.
p = np.poly1d(f0c)
f0_mse = mse(f0[lim_ini[idx]:lim_end[idx]],p(x))
# plt.plot(p(x),'k',label='Fitted')
# plt.plot(f0[lim_ini[idx]:lim_end[idx]],'r',label='Real')
# plt.legend()
if len(sig[tini:tend])>int(3*0.04*fs):
frames = sg.extract_windows(sig[tini:tend],int(0.04*fs),int(0.01*fs))
jitter = ppq(f0[lim_ini[idx]:lim_end[idx]],3)
ak = np.max(frames,axis=1)
shimmer = apq(ak,3)
else:
jitter = 0
shimmer = 0
f0temp = np.hstack([jitter,shimmer,len(sig[tini:tend])/fs,f0_mse,f0c])
f0_feats.append(f0temp)
#--------------------------------
#------------------------------------
#Unvoiced segments
tini = int(lim_end[idx]*stepTime*fs)
if (idx+1)<(len(lim_ini)-1):
tend = int(lim_ini[idx+1]*stepTime*fs)
if int(tend-tini)<int(0.27*fs):
uv_segm.append(sig[tini:tend])
#--------------------------------------------------------------------
f0_feats = np.vstack(f0_feats)
return f0_feats,v_segm,uv_segm
#==========================================================================
def voiced_seg(sig,fs,f0,stepTime):
"""
Voiced segments
sig: Speech signal
fs: Sampling frequency
f0: Pitch contour
stepTime: Step size (in seconds) used to computed the f0 contour.
"""
yp = f0.copy()
yp[yp!=0] = 1
#In case the starting point is F0 and not 0
if yp[0] == 1:
np.insert(yp, 0, 1)
if yp[-1:] == 1:
np.insert(yp, 0, len(yp)-1)
#---------------------
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
v_segm = []
tm = []
vcont = np.zeros(len(sig))
for idx in range(len(lim_ini)):
#------------------------------------
#Voiced segments
tini = int(lim_ini[idx]*stepTime*fs)
tend = int(lim_end[idx]*stepTime*fs)
if int(tend-tini)>int(0.04*fs):
# print(tini,tend)
vcont[tini:tend] = 1
v_segm.append(sig[tini:tend])
tm.append(np.hstack([lim_ini[idx]*stepTime,lim_end[idx]*stepTime]))
vseg = {'Voiced_segments':v_segm,
'Voiced_times':tm,
'Voiced_labels':vcont}
return vseg
#----------------------------------------------------------------------------
def unvoiced_seg(sig,fs,vseg,sil):
uvcont = sil+vseg+1
uvcont[uvcont>1] = 0
uvcont[0] = 0
uvcont[-1:] = 0
yp = uvcont.copy()
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
uv_seg = []
uv_dur = []
uv_tm = []
for idx in range(len(lim_ini)):
#------------------------------------
try:
tini = lim_ini[idx]/fs
tend = lim_end[idx]/fs
# uv_dur.append(tend-tini)
uv_seg.append(sig[lim_ini[idx]:lim_end[idx]])
uv_tm.append([tini,tend])
except:
print('Unvoiced segment not included')
uv_dur = np.asarray(uv_dur)
return uv_seg,uv_tm,uvcont
#----------------------------------------------------------------------------
def eVAD(sig,fs,win=0.015,step=0.01):
"""
Energy-based Voice Activity Detection
"""
#Normalize signal
sig = sig-np.mean(sig)
sig /=np.max(np.abs(sig))
lsig = len(sig)
#Add silence to the beginning and end in case the user is an idiot or myself
#Set min threshold base on the energy of the signal
e = []
frames = sg.extract_windows(sig,int(win*fs),int(step*fs))
for seg in frames:
e.append(10*np.log10(np.sum(np.absolute(seg)**2)/len(seg)))
e = np.asarray(e)
idx_min = np.where(e==np.min(e))
thr = np.min(frames[idx_min])
ext_sil = int(fs)
esil = int((ext_sil/2)/fs/step)
new_sig = np.random.randn(lsig+ext_sil)*thr
new_sig[int(ext_sil/2):lsig+int(ext_sil/2)] = sig
sig = new_sig
e = []#energy in dB
frames = sg.extract_windows(sig,int(win*fs),int(step*fs))
frames*=np.hanning(int(win*fs))
for seg in frames:
e.append(10*np.log10(np.sum(np.absolute(seg)**2)/len(seg)))
e = np.asarray(e)
e = e-np.mean(e)
#Smooth energy contour to remove small energy variations
gauslen = int(fs*0.01)
window = gaussian(gauslen, std=int(gauslen*0.05))
#Convolve signal with Gaussian window for smmothing
smooth_env = e.copy()
smooth_env = sp.convolve(e,window)
smooth_env = smooth_env/np.max(smooth_env)
ini = int(gauslen/2)
fin = len(smooth_env)-ini
e = smooth_env[ini:fin]
e = e/np.max(np.abs(e))
e = e[esil:int(lsig/fs/step)+esil]
thr = np.median(e[e<0])
cont_sil = np.zeros(lsig)
cont_vad = np.zeros(lsig)
itime = 0
etime = int(win*fs)
for i in range(len(e)):
if e[i]<=thr:
cont_sil[itime:etime] = 1
else:
cont_vad[itime:etime] = 1
itime = i*int(step*fs)
etime = itime+int(win*fs)
sig = sig[int(ext_sil/2):lsig+int(ext_sil/2)]#Remove silence added at the begining
if np.sum(cont_sil)!=0:
#Pauses
dur_sil,seg_sil,time_sil = get_segments(sig,fs,cont_sil)
#Voice
dur_vad,seg_vad,time_vad = get_segments(sig,fs,cont_vad)
else:
dur_sil = [0]
seg_sil = [0]
dur_vad = [0]
seg_vad= [0]
X_vad = {'Pause_labels':cont_sil,
'Pause_duration':dur_sil,
'Pause_segments':seg_sil,
'Pause_times':time_sil,
'Speech_labels':cont_vad,
'Speech_duration':dur_vad,
'Speech_segments':seg_vad,
'Speech_times':time_vad}
return X_vad
def get_segments(sig,fs,segments):
segments[0] = 0
segments[-1:] = 0
yp = segments.copy()
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Silence segments
seg_dur = []#Segment durations
seg_list = []#Segment list
seg_time = []#Time stamps
for idx in range(len(lim_ini)):
#------------------------------------
tini = lim_ini[idx]/fs
tend = lim_end[idx]/fs
seg_dur.append(np.abs(tend-tini))
seg_list.append(sig[lim_ini[idx]:lim_end[idx]])
seg_time.append([tini,tend])
seg_dur = np.asarray(seg_dur)
seg_time = np.vstack(seg_time)
return seg_dur,seg_list,seg_time
#----------------------------------------------------------------------------
def decodef0_transitions(sig,fs,f0,trans,sztr=0.16,step=0.01):
"""
F0 is the pitch contourn
trans = onset or offset
sztr: Size of the transition. Default is 160 ms:80 ms voiced; 80 ms unvoiced
step: The step used to compute the f0 contourn of the signal
"""
if trans.lower()=='onset':
trflag=1
elif trans.lower()=='offset':
trflag=-1
else:
return print('Options in trans: onset or offset')
modf0 = f0.copy()
modf0[modf0>0] = 1
#f0 will be found were modf0!=0
f0diff = np.diff(modf0)
#transitions will be found where f0diff=trflag
idx_tr = np.where(f0diff==trflag)[0]
idx_tr = idx_tr+1#Compensate 1 for the np.diff operation
tm = [] #Time stamps
seg_tr = [] #Onset or Offset segment
winl = int(sztr*fs/2)#Size of the transition in samples
for iseg in idx_tr:
t1 = int(iseg*step*fs-winl)
t2 = int(iseg*step*fs+winl)
seg = sig[t1:t2]
if len(seg)>=int(fs*sztr):
seg_tr.append(seg)
tm.append([t1/fs,t2/fs])
return seg_tr,tm
def f0_features(sig,fs,f0=np.asarray([0]),winTime=0.04,stepTime=0.01):
if (np.sum(f0)==0)&(len(f0)==1):
f0 = f0_contour_pr(sig,fs,winTime,stepTime)#F0
#---------------------------------------
#F0 FEATURES
uf0 = np.mean(f0[f0>0])
sf0 = np.std(f0[f0>0])
#F0 in semitones
# ust = Hz2Semitone(uf0)
# sst = Hz2Semitone(sf0)
# feats_f0 = np.hstack([uf0,sf0,ust,sst])
feats_f0 ={'F0_mean':uf0,
'F0_std':sf0}
return feats_f0
def voiced_features(sig,fs,f0,stepTime):
"""
Voiced segment features
"""
vsegs = voiced_seg(sig,fs,f0,stepTime)
#Voiced features
lvoiced = []
for v in vsegs['Voiced_segments']:
lvoiced.append(len(v)/fs)#Length of voiced segment
uvoiced = np.mean(lvoiced)#Average length
vrate = (len(vsegs['Voiced_segments'])*fs)/len(sig)#Voiced segments per second
numv = len(vsegs['Voiced_segments'])
#Rhythm -based
rPVI,nPVI = get_pvi(lvoiced)
pGPI,dGPI = get_gpi(lvoiced,len(sig)/fs) #pGPI = Voiced rate
# feats_voiced = np.hstack([numv,vrate,uvoiced,rPVI,nPVI,pGPI,dGPI])
feats_voiced = {'Voiced_counts':numv,
'Voiced_rate':vrate,
'Voiced_duration':uvoiced,
'Voiced_rPVI':rPVI,
'Voiced_nPVI':nPVI,
'Voiced_dGPI':dGPI}
return feats_voiced,vsegs['Voiced_labels']
def unvoiced_features(sig,fs,vcont,sil_cont):
"""
Unvoiced segment features.
Requires voiced and silence/pauses segment detection.
"""
#Unvoiced features
uv_seg,_,_ = unvoiced_seg(sig,fs,vcont,sil_cont)
lunvoiced = []
for uv in uv_seg:
lunvoiced.append(len(uv)/fs)#Length of unvoiced segment
uunvoiced = np.mean(lunvoiced)#Average length
# sunvoiced = np.std(lunvoiced)#variation of length
uvrate = (len(uv_seg)*fs)/len(sig)#Unvoiced segments per second
numuv = len(uv_seg)
rPVI,nPVI = get_pvi(lunvoiced)
pGPI,dGPI = get_gpi(lunvoiced,len(sig)/fs)
# feats_unvoiced = np.hstack([numuv,uvrate,uunvoiced,rPVI,nPVI,pGPI,dGPI])
feats_unvoiced = {'Unvoiced_counts':numuv,
'Unvoiced_rate':uvrate,
'Unvoiced_duration':uunvoiced,
'Unvoiced_rPVI':rPVI,
'Unvoiced_nPVI':nPVI,
'Unvoiced_dGPI':dGPI}
return feats_unvoiced
def VAD_features(sig,fs,out_VAD,win_time=0.025,step_time=0.01):
npause,rpause,dpause = duration_features(sig,fs,out_VAD['Pause_duration'],out_VAD['Pause_segments'])
nspeech,rspeech,dspeech = duration_features(sig,fs,out_VAD['Speech_duration'],out_VAD['Speech_segments'])
#Compute energy based features only for speech segments
mSPL_vad,sSPL = VAD_energy_features(sig,fs,out_VAD['Speech_segments'],win_time,step_time)
feats_vad ={'Pause_counts':npause,
'Pause_rate':rpause,
'Pause_duration':dpause,
'Speech_counts':nspeech,
'Speech_rate':rspeech,
'Speech_duration':dspeech,
'SPL_mean':mSPL_vad,
'SPL_std':sSPL}
return feats_vad
def duration_features(sig,fs,dsegment,segment):
#Number of pauses, Duration of pauses, pauses per second
dsegm = np.mean(dsegment)
rsegm = (len(segment)*fs)/len(sig)
nsegm = len(segment)
return nsegm,rsegm,dsegm
def VAD_energy_features(sig,fs,seg_vad,win_time=0.025,step_time=0.01):
"""
The SPL should be only computed for the speech segments
Parameters
----------
sig : TYPE
DESCRIPTION.
fs : TYPE
DESCRIPTION.
seg_vad : TYPE
DESCRIPTION.
win_time : TYPE, optional
DESCRIPTION. The default is 0.025.
step_time : TYPE, optional
DESCRIPTION. The default is 0.005.
Returns
-------
mSPL_vad : TYPE
DESCRIPTION.
sSPL : TYPE
DESCRIPTION.
"""
SPL = sound_pressure_level(sig,fs,win_time,step_time)
SPL_vad = []
for ivad in seg_vad:
SPL = sound_pressure_level(ivad,fs,win_time,step_time)
SPL_vad.append(np.mean(SPL))
mSPL_vad = np.mean(SPL_vad)
sSPL = np.std(SPL_vad)
return mSPL_vad,sSPL
def sound_pressure_level(sig,fs,win_time=0.025,step_time=0.01):
"""
Sound Pressure Level as in:
<NAME>, <NAME>. Tutorial and Guidelines on Measurement of Sound
Pressure Level in Voice and Speech. Journal of Speech, Language, and Hearing Research.
2018 Mar 15;61(3):441-461. doi: 10.1044/2017_JSLHR-S-17-0095. PMID: 29450495.
SPL = 20*log10(p/p0)
20xlog refers to a root-power quantity e.g., volts, sound pressure, current...
Intensity in dBs:
ene = 10*log10(sum(x^2)/N)
10xlog refers to a power quantity, i.e. quantities directly proportional to power
x: speech signal
N: lenght of x
p = RMS value of x
p0 = 20uPA = 0.00002 Hearing threshold
"""
#Set a threshold based on the energy of the signal
if len(sig)>3*int(win_time*fs):
frames = sg.extract_windows(sig,int(win_time*fs),int(step_time*fs))
else:
frames = list([sig])
SPL = []#Sound Pressure Level
p0 = 2*(10**-5)#Hearing threshold at SLP 0dB
for x in frames:
#Sound Pressure Level (dBs)
p = np.sqrt(np.sum((x)**2)/len(x))
Lp = 20*np.log10(p/p0)
SPL.append(Lp)
SPL = np.asarray(SPL)
return SPL
def ppq(f0,pq=2):
"""
<NAME>., & <NAME>. (2016). Algorithm for jitter and shimmer
measurement in pathologic voices. Procedia Computer Science, 100, 271-279.
f0: Fundamental frequency contour
pq: Number of points to be considered
pq = 2 : Jitter
pq = 3 : Relative Average Perturbation
pq = 5 : PPQ computed every 5 points of f0
"""
#Non zero f0
f0 = f0[f0>0]
N = len(f0)
ppq = []
start = int(np.floor(pq/2))
for i in range(start,N):
# ppq.append(np.abs(f0[i]-Mp))
if pq>1:
neig = np.mean(f0[i-start:i+(pq-start)])
else:
neig = f0[i-1]
ppq.append(np.abs(f0[i]-neig))
ppq = np.sum(np.asarray(ppq))/(N-1)
ppq = (100*ppq)/np.mean(f0)
return ppq
#########################################################################
def apq(ak,pq=2):
"""
<NAME>., & <NAME>. (2016). Algorithm for jitter and shimmer
measurement in pathologic voices. Procedia Computer Science, 100, 271-279.
ak: Maximum amplitude of the signal
pq: Number of points to be considered
pq=3 : Shimmer
pq=5 : APQ computed every 5 points
"""
# ak = np.zeros(frames.shape[0])
# for ie in range(len(ak)):
# ak[ie] = np.max(frames[ie])
N = len(ak)
#Max F0
# Ma = np.max(np.abs(ak))
apq = []
start = int(np.floor(pq/2))
for i in range(start,N):
if pq>1:
neig = np.mean(ak[i-start:i+(pq-start)])
else:
neig = ak[i-1]
apq.append(np.absolute(ak[i]-neig))
apq = np.sum( | np.asarray(apq) | numpy.asarray |
import os
import sys
import numpy as np
# import BoundaryDetector from lib directory
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from lib.BoundaryDetector import BoundaryDetector
# generate expected map
expected_sym_map = | np.ones((100,100),dtype=np.bool_) | numpy.ones |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.one_hot_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class OneHotTest(tf.test.TestCase):
def _testOneHot(self, truth, use_gpu=False, expected_err_re=None,
raises=None, **inputs):
with self.test_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
tf.one_hot(**inputs)
else:
ans = tf.one_hot(**inputs)
if expected_err_re is None:
tf_ans = ans.eval()
self.assertAllEqual(tf_ans, truth)
self.assertEqual(tf_ans.shape, ans.get_shape())
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval()
def _testBothOneHot(self, truth, expected_err_re=None, raises=None, **inputs):
self._testOneHot(truth, True, expected_err_re, raises, **inputs)
self._testOneHot(truth, False, expected_err_re, raises, **inputs)
def _testBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = | np.asarray(-1.0, dtype=dtype) | numpy.asarray |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from copy import deepcopy
import numpy as np
from numpy.testing import assert_allclose
import pytest
from jax import random, test_util
import numpyro
from numpyro import handlers
from numpyro.contrib.module import (
ParamShape,
_update_params,
flax_module,
haiku_module,
random_flax_module,
random_haiku_module
)
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
def haiku_model(x, y):
import haiku as hk
linear_module = hk.transform(lambda x: hk.Linear(100)(x))
nn = haiku_module("nn", linear_module, input_shape=(100,))
mean = nn(x)
numpyro.sample("y", numpyro.distributions.Normal(mean, 0.1), obs=y)
def flax_model(x, y):
import flax
linear_module = flax.nn.Dense.partial(features=100)
nn = flax_module("nn", linear_module, input_shape=(100,))
mean = nn(x)
numpyro.sample("y", numpyro.distributions.Normal(mean, 0.1), obs=y)
def test_flax_module():
X = np.arange(100)
Y = 2 * X + 2
with handlers.trace() as flax_tr, handlers.seed(rng_seed=1):
flax_model(X, Y)
assert flax_tr["nn$params"]['value']['kernel'].shape == (100, 100)
assert flax_tr["nn$params"]['value']['bias'].shape == (100,)
def test_haiku_module():
X = np.arange(100)
Y = 2 * X + 2
with handlers.trace() as haiku_tr, handlers.seed(rng_seed=1):
haiku_model(X, Y)
assert haiku_tr["nn$params"]['value']['linear']['w'].shape == (100, 100)
assert haiku_tr["nn$params"]['value']['linear']['b'].shape == (100,)
def test_update_params():
params = {'a': {'b': {'c': {'d': 1}, 'e': np.array(2)}, 'f': | np.ones(4) | numpy.ones |
"""
This file: animation or figure plot of the relative position and yaw based on real-world data
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from dataCreate import realData
from relativeEKF import EKFonRealData
import transform
# system settings
show_animation = True
border = {"xmin":-4, "xmax":4, "ymin":-4, "ymax":4, "zmin":0, "zmax":4}
numRob = 3 # number of robots
dt = 0.01 # time interval [s]
relativeState = np.zeros((3, numRob, numRob))
dataFromReal = realData("./dataset/dat02.csv", numRob)
uList, zList, GtList, simTime = dataFromReal.readDataTolist()
relaEKFonRealData = EKFonRealData(10, 1, 0.25, 0.1, 0.1, numRob)
def animate(step):
global relativeState
xTrue, zNois, uNois = dataFromReal.calcInputDataset(uList[step], zList[step], GtList[step])
relativeState = relaEKFonRealData.EKF(uNois, zNois, relativeState)
xEsti = transform.calcAbsPosUseRelaPosWRTRob0(xTrue[:,0], relativeState, xTrue, numRob)
pointsTrue.set_data(xTrue[0, :], xTrue[1, :])
pointsEsti.set_data(xEsti[0, :], xEsti[1, :])
pointsTrueHead.set_data(xTrue[0, :]+0.07* | np.cos(xTrue[2, :]) | numpy.cos |
# @version: 1.0 date: 05/06/2015 by <NAME>
# @author: <EMAIL>, <EMAIL>, <EMAIL>
# @copyright: EPFL-IC-LCAV 2015
from __future__ import division
import numpy as np
import scipy.linalg as la
from .parameters import constants
from . import utilities as u
from .soundsource import build_rir_matrix
from . import windows
from . import stft
#=========================================================================
# Free (non-class-member) functions related to beamformer design
#=========================================================================
def H(A, **kwargs):
'''Returns the conjugate (Hermitian) transpose of a matrix.'''
return np.transpose(A, **kwargs).conj()
def sumcols(A):
'''
Sums the columns of a matrix (np.array).
The output is a 2D np.array
of dimensions M x 1.
'''
return np.sum(A, axis=1, keepdims=1)
def mdot(*args):
'''Left-to-right associative matrix multiplication of multiple 2D ndarrays.'''
ret = args[0]
for a in args[1:]:
ret = np.dot(ret, a)
return ret
def distance(x, y):
'''
Computes the distance matrix E.
E[i,j] = sqrt(sum((x[:,i]-y[:,j])**2)).
x and y are DxN ndarray containing N D-dimensional vectors.
'''
# Assume x, y are arrays, *not* matrices
x = np.array(x)
y = np.array(y)
# return np.sqrt((x[0,:,np.newaxis]-y[0,:])**2 + (x[1,:,np.newaxis]-y[1,:])**2)
return np.sqrt(np.sum((x[:, :, np.newaxis] - y[:, np.newaxis, :])**2, axis=0))
def unit_vec2D(phi):
return np.array([[np.cos(phi), np.sin(phi)]]).T
def linear_2D_array(center, M, phi, d):
'''
Creates an array of uniformly spaced linear points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M)
The array of points
'''
u = unit_vec2D(phi)
return np.array(center)[:, np.newaxis] + d * \
(np.arange(M)[np.newaxis, :] - (M - 1.) / 2.) * u
def circular_2D_array(center, M, phi0, radius):
'''
Creates an array of uniformly spaced circular points in 2D
Parameters
----------
center: array_like
The center of the array
M: int
The number of points
phi0: float
The counterclockwise rotation of the first element in the array (from the x-axis)
radius: float
The radius of the array
Returns
-------
ndarray (2, M)
The array of points
'''
phi = np.arange(M) * 2. * np.pi / M
return np.array(center)[:, np.newaxis] + radius * \
np.vstack((np.cos(phi + phi0), np.sin(phi + phi0)))
def poisson_2D_array(center, M, d):
'''
Create array of 2D positions drawn from Poisson process.
Parameters
----------
center: array_like
The center of the array
M: int
The number of points in the first dimension
M: int
The number of points in the second dimension
phi: float
The counterclockwise rotation of the array (from the x-axis)
d: float
The distance between neighboring points
Returns
-------
ndarray (2, M * N)
The array of points
'''
from numpy.random import standard_exponential, randint
R = d* | standard_exponential((2, M)) | numpy.random.standard_exponential |
import numpy as np
class Board:
def __init__(self, size = (3, 3), array = None):
# if array == None:
if array is None:
self.board = np.zeros(size, dtype=np.int8)
else:
self.board = np.array(array, dtype=np.int8)
self.x_size = self.board.shape[0]
self.y_size = self.board.shape[1]
self.player_who_won = None
def move(self, x, y, current_player):
self.board[x, y] = current_player
def are_same_and_non_zero(self, array):
return np.unique(array).size == 1 and array[0] != 0
def is_board_full(self):
return not np.any( | np.unique(self.board) | numpy.unique |
"""
Definition of a set of Numpy Helper classes.
(c) 2020 d373c7
"""
import unittest
import numpy as np
import d373c7.engines as en
import d373c7.features as ft
FILES_DIR = './files/'
class TestCreation(unittest.TestCase):
def test_creation_base(self):
x = np.arange(10)
y = np.arange(10)
c = [x, y]
n = en.NumpyList(c)
self.assertIsInstance(n, en.NumpyList)
self.assertEqual(len(n), len(x), f'Length not correct {len(n)}/{len(x)}')
self.assertEqual(len(n.shapes[0]), 1, f'Shape should only have 1 dim {len(n.shapes[0])}')
self.assertEqual(n.shapes[0][0], len(x), f'Shape of dim 0 incorrect {n.shapes[0][0]}')
self.assertEqual(len(n.shapes[1]), 1, f'Shape should only have 1 dim {len(n.shapes[1])}')
self.assertEqual(n.shapes[1][0], len(y), f'Shape of dim 0 incorrect {n.shapes[1][0]}')
self.assertEqual(n.number_of_lists, len(c), f'Number of lists incorrect {n.number_of_lists}')
self.assertEqual(n.dtype_names[0], x.dtype.name, f'dtype not expected {n.dtype_names[0]}')
self.assertEqual(n.dtype_names[1], y.dtype.name, f'dtype not expected {n.dtype_names[1]}')
self.assertListEqual(n.lists, c, f'Not the expected return from numpy_list {n.lists}')
def test_creation_wrong_size(self):
x = np.random.rand(5, 2)
y = np.random.rand(2, 2)
with self.assertRaises(en.NumpyListException):
en.NumpyList([x, y])
def test_lists(self):
x = np.random.rand(5, 2)
y = np.random.rand(5, 2)
c = [x, y]
n = en.NumpyList(c)
self.assertEqual(len(n.lists), len(c), f'Number of lists does not add up {len(n.lists)}')
self.assertEqual((n.lists[0] == x).all(), True, f'Lists not equal')
self.assertEqual((n.lists[1] == y).all(), True, f'Lists not equal')
def test_slice_good(self):
x = np.random.rand(5, 2)
y = np.random.rand(5, 2)
c = [x, y]
n = en.NumpyList(c)
x0, y0 = n[0].lists
self.assertEqual(np.array(x0 == x[0]).all(), True, f'First entries do not match {x0}, {x[0]}')
self.assertEqual(np.array(y0 == y[0]).all(), True, f'First entries do not match {y0}, {y[0]}')
x1, y1 = n[1].lists
self.assertEqual(np.array(x1 == x[1]).all(), True, f'Second entries do not match {x1}, {x[1]}')
self.assertEqual(np.array(y1 == y[1]).all(), True, f'Second entries do not match {y1}, {y[1]}')
xf, yf = n[0:5].lists
self.assertEqual(np.array(xf == x).all(), True, f'All entries do not match {xf}, {x}')
self.assertEqual(np.array(yf == y).all(), True, f'All entries do not match {yf}, {y}')
xm, ym = n[1:4].lists
self.assertEqual(np.array(xm == x[1:4]).all(), True, f'Mid entries do not match {xf}, {x[1:4]}')
self.assertEqual(np.array(ym == y[1:4]).all(), True, f'Mid entries do not match {yf}, {y[1:4]}')
xl, yl = n[4].lists
self.assertEqual(np.array(xl == x[-1]).all(), True, f'Last entries do not match {xl}, {x[-1]}')
self.assertEqual(np.array(yl == y[-1]).all(), True, f'Last entries do not match {yl}, {y[-1]}')
xl, yl = n[-1].lists
self.assertEqual( | np.array(xl == x[-1]) | numpy.array |
import logging
import random
from abc import ABC
from typing import List
import numpy as np
import scipy.stats
from network import physical_network
from experiment_utils.Order import Order
class OrderGenerator(ABC):
# Generates a set of locations for the next timestep.
def generate_orders(self, current_t: int) -> List[Order]:
pass
class NaiveOrderGenerator(OrderGenerator):
default_delivery_time = 1
def __init__(self, num_dcs, num_customers, orders_per_day):
self.num_dcs = num_dcs
self.num_customers = num_customers
self.orders_per_day = orders_per_day
def generate_orders(self): # TODO: needs a list of commodities, also needs the
customer = "c_" + str(np.random.choice(np.arange(self.num_customers)))
dc = "dc_" + str(np.random.choice(np.arange(self.num_dcs)))
demand = random.randint(0, 50)
return [
Order(demand, dc, customer, self.default_delivery_time)
for it in range(self.orders_per_day)
]
class ActualOrderGenerator(OrderGenerator):
"""
The original is independent means for each product customer.
"""
network: physical_network
orders_per_day: int
def __init__(self, network: physical_network, orders_per_day):
self.network = network
self.orders_per_day = orders_per_day
def generate_orders(self, current_t) -> List[Order]:
return self._generate_orders(self.orders_per_day, current_t)
def _generate_orders(
self, orders_per_day: int, current_t
): # TODO test and validate.
# Choose customers to generate orders with OUT replacement, orders per day must be <= customers
chosen_customers = np.random.choice(
np.arange(self.network.num_customers), size=orders_per_day, replace=False
)
order_means = self.network.customer_means[chosen_customers]
demand = np.floor(
np.random.multivariate_normal(
order_means,
np.eye(orders_per_day) * self.network.demand_var,
size=self.network.num_commodities,
)
) # shape (num_commodities,num_orders)
if (demand < 0).any():
logging.info("Customer means that caused negatives")
logging.info(order_means)
# raise Exception("Generated a negative order")
demand = np.abs(demand)
# Create order objects
orders = []
for ci in range(len(chosen_customers)):
order_demand_vector = demand[:, ci]
_chosen_customer = chosen_customers[ci]
customer_node = self.network.customers[_chosen_customer]
chosen_initial_point = np.random.choice(
np.argwhere(self.network.dcs_per_customer_array[ci, :]).reshape(-1)
)
initial_point_physical_node = self.network.dcs[chosen_initial_point]
time = (
current_t + self.network.planning_horizon - 1
) # Orders appear on the edge of PH.
orders.append(
Order(
order_demand_vector,
initial_point_physical_node,
customer_node,
time,
name=f"oc_{customer_node.node_id}:{time}",
)
)
return orders
class BiasedOrderGenerator(OrderGenerator):
"""
# biased is more skewed and there's correlations in products.
"""
network: physical_network
orders_per_day: int
customer_means: np.array
pz_numerator: float # this is a test
def __init__(self, network: physical_network, orders_per_day, pz_numerator=1.0):
self.network = network
self.orders_per_day = orders_per_day
self.customer_covariances = (
self._generate_customer_covariances()
) # shape:(C,K,K)
self.customer_means = self._generate_customer_means()
self.pz_numerator = pz_numerator
def _generate_customer_covariances(self):
"""
Returns: A covariance matrix with shape (num_customers,K,K)
"""
K = self.network.num_commodities
num_customers = self.network.num_customers
return (
scipy.stats.invwishart(K, np.ones(K))
.rvs(size=num_customers)
.reshape(num_customers, K, K)
)
def _generate_customer_means(self):
# total_demand_mean = self.network.demand_mean * self.network.num_customers * self.network.num_commodities
return np.random.poisson(
self.network.demand_mean / self.network.num_commodities,
size=self.network.num_commodities,
)
# return np.floor(
# np.random.dirichlet(self.network.num_commodities / np.arange(1, self.network.num_commodities + 1),
# size=1) * total_demand_mean).reshape(-1) + self.network.demand_mean # shape (commodities)
def generate_orders(self, current_t) -> List[Order]:
# todo params
chosen_customers = np.random.choice(
np.arange(self.network.num_customers),
size=self.orders_per_day,
replace=False,
)
order_means = self.network.customer_means[
chosen_customers
] # getting the means from the network but the covariances from here for legacy reasons.
K = self.network.num_commodities
####
# Generating covariance matrix with inverse Wishart distribution. What does that parameter do?
# Like Chi^2 but high dimensional, for generating covariance matrices.
covar = scipy.stats.invwishart(K, np.ones(K)).rvs(size=1)
orders = []
for ci in range(len(chosen_customers)):
means = self.customer_means
covar = self.customer_covariances[ci, :, :]
# Sampling X from a multivariate normal with the covariance from Wishart.
multivariate_normal_x = np.random.multivariate_normal(
np.zeros(means.shape), covar, size=1
)
# Extract the probability density of the sampled values. Is the sqrt(diag(covar)) arbitrary?
px = scipy.stats.norm(0, np.sqrt(np.diagonal(covar))).cdf(
multivariate_normal_x
)
# Take those quantiles and plug them into a geometric. This is going to skew the data and project it into the range that we want starting at 0.
# qgeom(x,prob). X is a vector of quantiles of the probability of failures in a Bernoulli (shape K). Second param is probabilities. Why pz(1-pz)?? Something related to MLE?
# pz = 1 / means
# TODO just to check if means are impacting in any way
pz = self.pz_numerator / means
order_demand = scipy.stats.geom(p=pz * (1 - pz)).ppf(px).flatten()
_chosen_customer = chosen_customers[ci]
customer_node = self.network.customers[_chosen_customer]
chosen_initial_point = np.random.choice(
np.argwhere(self.network.dcs_per_customer_array[ci, :]).reshape(-1)
)
initial_point_physical_node = self.network.dcs[chosen_initial_point]
time = (
current_t + self.network.planning_horizon - 1
) # Orders appear on the edge of PH.
orders.append(
Order(
order_demand,
initial_point_physical_node,
customer_node,
time,
name=f"oc_{customer_node.node_id}:{time}",
)
)
return orders
class NormalOrderGenerator(BiasedOrderGenerator):
"""
A makeshift, normal multivariate attempt to reduce the variance by Javier.
"""
def __init__(self, network: physical_network, orders_per_day):
super(NormalOrderGenerator, self).__init__(network, orders_per_day, 1.0)
def generate_orders(self, current_t):
# todo params
chosen_customers = np.random.choice(
np.arange(self.network.num_customers),
size=self.orders_per_day,
replace=False,
)
order_means = self.network.customer_means[
chosen_customers
] # getting the means from the network but the covariances from here for legacy reasons.
K = self.network.num_commodities
####
# Generating covariance matrix with inverse Wishart distribution. What does that parameter do?
# Like Chi^2 but high dimensional, for generating covariance matrices.
covar = scipy.stats.invwishart(K, np.ones(K)).rvs(size=1)
orders = []
for ci in range(len(chosen_customers)):
means = self.customer_means
covar = self.customer_covariances[ci, :, :] * self.network.demand_var
# Round down the ints and add 1 to avoid zero demands.
order_demand = (
np.random.multivariate_normal(means, covar).astype(int) + 1
).astype(float)
order_demand = np.where(order_demand < 1.0, 1.0, order_demand)
_chosen_customer = chosen_customers[ci]
customer_node = self.network.customers[_chosen_customer]
chosen_initial_point = np.random.choice(
| np.argwhere(self.network.dcs_per_customer_array[ci, :]) | numpy.argwhere |
import nengo
import numpy as np
# this is a dummy environment with a simple sensor and motor
class ExampleIO:
def __init__(self):
self.data = 0.0
def sensor(self):
return self.data + | np.random.normal(0, 0.1) | numpy.random.normal |
"""
<NAME> (<EMAIL>)
Class to define the Dataset object.
"""
from PIL import Image
import os
import numpy as np
import scipy.io
import pandas as pd
class Dataset:
def __init__(self, train_df, test_df, val_df, database_root, number_of_slices, store_memory=True):
"""Initialize the Dataset object
Args:
train_df (dataframe): Training dataframe from TrainTestSplit.split
test_df (dataframe): Testing dataframe from TrainTestSplit.splt
val_df (dataframe): Validation dataframe from TrainTestSplit.split
database_root (str): db root from config
number_of_slices (int): Number of slices per group
store_memory (bool, optional): Memory management argument. Defaults to True.
"""
# for idx, row in train_df.iterrows():
# print(type(row))
# print(row)
# #print("Images volumes, {}".format(row.iloc[i*3]))
# # #scipy 1.2.3
self.images_train = []
self.images_train_path = []
self.labels_train = []
self.labels_train_path = []
self.labels_liver_train = []
self.labels_liver_train_path = []
if train_df is not None:
train_df = pd.read_csv(train_df, delim_whitespace = True) if isinstance(train_df, str) else train_df
for idx, row in train_df.iterrows():
if (len(row) > 3):
if store_memory:
aux_images_train = []
aux_labels_train = []
aux_labels_liver_train = []
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3]))
aux_images_train.append(np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32))
self.images_train.append(np.array(aux_images_train))
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3 + 1]))
aux_labels_train.append(np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32))
self.labels_train.append(np.array(aux_labels_train))
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3 + 2]))
aux_labels_liver_train.append(np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32))
self.labels_liver_train.append(np.array(aux_labels_liver_train))
if (idx + 1) % 1000 == 0:
print('Loaded ' + str(idx) + ' train images')
aux_images_train_path = []
aux_labels_train_path = []
aux_labels_liver_train_path = []
for i in range(number_of_slices):
aux_images_train_path.append(os.path.join(database_root, str(row.iloc[i * 3])))
self.images_train_path.append(np.array(aux_images_train_path))
for i in range(number_of_slices):
aux_labels_train_path.append(os.path.join(database_root, str(row.iloc[i * 3 + 1])))
self.labels_train_path.append(np.array(aux_labels_train_path))
for i in range(number_of_slices):
aux_labels_liver_train_path.append(os.path.join(database_root, str(row.iloc[i * 3 + 2])))
self.labels_liver_train_path.append(np.array(aux_labels_liver_train_path))
self.images_train_path = np.array(self.images_train_path)
self.labels_train_path = np.array(self.labels_train_path)
self.labels_liver_train_path = np.array(self.labels_liver_train_path)
# Load testing images (path) and labels
self.images_test = []
self.images_test_path = []
if test_df is not None:
test_df = pd.read_csv(test_df, delim_whitespace = True) if isinstance(test_df, str) else test_df
for idx, row in test_df.iterrows():
if (len(row) > 1):
if store_memory:
aux_images_test = []
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3])) # os.path.join(database_root, str(line.split()[i * 3]))
aux_images_test.append(
np.array(scipy.io.loadmat(mat_file)['section'],
dtype=np.float32))
self.images_test.append(np.array(aux_images_test))
if (idx + 1) % 1000 == 0:
print('Loaded ' + str(idx) + ' test images')
aux_images_test_path = []
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3]))
aux_images_test_path.append(mat_file)
self.images_test_path.append(np.array(aux_images_test_path))
self.images_val = []
self.images_val_path = []
self.labels_val = []
self.labels_val_path = []
self.labels_liver_val = []
self.labels_liver_val_path = []
if val_df is not None:
val_df = pd.read_csv(val_df, delim_whitespace = True) if isinstance(val_df, str) else val_df
for idx, row in val_df.iterrows():
if (len(row) > 3):
if store_memory:
aux_images_val = []
aux_labels_val = []
aux_labels_liver_val = []
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3]))
aux_images_val.append(
np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32))
self.images_val.append(np.array(aux_images_val))
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3 + 1]))
aux_images_val.append(
np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32))
self.labels_val.append(np.array(aux_labels_val))
for i in range(number_of_slices):
mat_file = os.path.join(database_root, str(row.iloc[i * 3 + 2]))
aux_images_val.append(
np.array(scipy.io.loadmat(mat_file)['section'], dtype=np.float32))
self.labels_liver_val.append(np.array(aux_labels_liver_val))
if (idx + 1) % 1000 == 0:
print('Loaded ' + str(idx) + ' train images')
aux_images_val_path = []
aux_labels_val_path = []
aux_labels_liver_val_path = []
for i in range(number_of_slices):
aux_images_val_path.append(os.path.join(database_root, str(row.iloc[i * 3])))
self.images_val_path.append(np.array(aux_images_val_path))
for i in range(number_of_slices):
aux_labels_val_path.append(os.path.join(database_root, str(row.iloc[i * 3 + 1])))
self.labels_val_path.append(np.array(aux_labels_val_path))
for i in range(number_of_slices):
aux_labels_liver_val_path.append(os.path.join(database_root, str(row.iloc[i * 3 + 2])))
self.labels_liver_val_path.append(np.array(aux_labels_liver_val_path))
self.images_val_path = np.array(self.images_val_path)
self.labels_val_path = np.array(self.labels_val_path)
self.labels_liver_val_path = np.array(self.labels_liver_val_path)
print('Done initializing Dataset')
# Init parameters
self.train_ptr = 0
self.test_ptr = 0
self.val_ptr = 0
self.train_size = len(self.images_train_path)
self.test_size = len(self.images_test_path)
self.val_size = len(self.images_val_path)
self.train_idx = np.arange(self.train_size)
self.val_idx = | np.arange(self.val_size) | numpy.arange |
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module contains an implementation of the the symmetry functions used in the Parkhill paper https://arxiv.org/pdf/1711.06385.pdf.
This implementation is different. It works for both data sets where all the molecules are the same but in different configurations and
for datasets with all different molecules.
Note: it is all in single precision.
"""
import tensorflow as tf
import numpy as np
def acsf_rad(xyzs, Zs, radial_cutoff, radial_rs, eta):
"""
This does the radial part of the symmetry function (G2 function in Behler's papers). It works only for datasets where
all samples are the same molecule but in different configurations.
:param xyzs: tf tensor of shape (n_samples, n_atoms, 3) contaning the coordinates of each atom in each data sample
:param Zs: tf tensor of shape (n_samples, n_atoms) containing the atomic number of each atom in each data sample
:param radial_cutoff: scalar tensor
:param radial_rs: tf tensor of shape (n_rs,) with the R_s values
:param eta: tf scalar
:return: tf tensor of shape (n_samples, n_atoms, n_atoms, n_rs)
"""
# Calculating the distance matrix between the atoms of each sample
with tf.name_scope("Distances"):
dxyzs = tf.expand_dims(xyzs, axis=2) - tf.expand_dims(xyzs, axis=1)
dist_tensor = tf.cast(tf.norm(dxyzs, axis=3), dtype=tf.float32) # (n_samples, n_atoms, n_atoms)
# Indices of terms that need to be zero (diagonal elements)
mask_0 = tf.zeros(tf.shape(dist_tensor))
mask_1 = tf.ones(tf.shape(Zs))
where_eq_idx = tf.cast(tf.matrix_set_diag(mask_0, mask_1), dtype=tf.bool)
# Calculating the exponential term
with tf.name_scope("Exponential_term"):
expanded_rs = tf.expand_dims(tf.expand_dims(tf.expand_dims(radial_rs, axis=0), axis=0), axis=0) # (1, 1, 1, n_rs)
expanded_dist = tf.expand_dims(dist_tensor, axis=-1) # (n_samples, n_atoms, n_atoms, 1)
exponent = - eta * tf.square(tf.subtract(expanded_dist, expanded_rs))
exp_term = tf.exp(exponent) # (n_samples, n_atoms, n_atoms, n_rs)
# Calculating the fc terms
with tf.name_scope("fc_term"):
# Finding where the distances are less than the cutoff
where_less_cutoff = tf.less(dist_tensor, radial_cutoff)
# Calculating all of the fc function terms
fc = 0.5 * (tf.cos(3.14159265359 * dist_tensor / radial_cutoff) + 1.0)
# Setting to zero the terms where the distance is larger than the cutoff
zeros = tf.zeros(tf.shape(dist_tensor), dtype=tf.float32)
cut_off_fc = tf.where(where_less_cutoff, fc, zeros) # (n_samples, n_atoms, n_atoms)
# Cleaning up diagonal terms
clean_fc_term = tf.where(where_eq_idx, zeros, cut_off_fc)
# Cleaning up dummy atoms terms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
cleaner_fc_term = tf.where(dummy_mask, clean_fc_term, zeros)
# Multiplying exponential and fc terms
expanded_fc = tf.expand_dims(cleaner_fc_term, axis=-1) # (n_samples, n_atoms, n_atoms, 1)
with tf.name_scope("Rad_term"):
presum_term = tf.multiply(expanded_fc, exp_term) # (n_samples, n_atoms, n_atoms, n_rs)
return presum_term
def acsf_ang(xyzs, Zs, angular_cutoff, angular_rs, theta_s, zeta, eta):
"""
This does the angular part of the symmetry function as mentioned here: https://arxiv.org/pdf/1711.06385.pdf
It only works for systems where all the samples are the same molecule but in different configurations.
:param xyzs: tf tensor of shape (n_samples, n_atoms, 3) contaning the coordinates of each atom in each data sample
:param Zs: tf tensor of shape (n_samples, n_atoms) containing the atomic number of each atom in each data sample
:param angular_cutoff: scalar tensor
:param angular_rs: tf tensor of shape (n_ang_rs,) with the equivalent of the R_s values from the G2
:param theta_s: tf tensor of shape (n_thetas,)
:param zeta: tf tensor of shape (1,)
:param eta: tf tensor of shape (1,)
:return: tf tensor of shape (n_samples, n_atoms, n_atoms, n_atoms, n_ang_rs * n_thetas)
"""
# Finding the R_ij + R_ik term
with tf.name_scope("Sum_distances"):
dxyzs = tf.expand_dims(xyzs, axis=2) - tf.expand_dims(xyzs, axis=1)
dist_tensor = tf.cast(tf.norm(dxyzs, axis=3), dtype=tf.float32) # (n_samples, n_atoms, n_atoms)
# This is the tensor where element sum_dist_tensor[0,1,2,3] is the R_12 + R_13 in the 0th data sample
sum_dist_tensor = tf.expand_dims(dist_tensor, axis=3) + tf.expand_dims(dist_tensor,
axis=2) # (n_samples, n_atoms, n_atoms, n_atoms)
# Problem with the above tensor: we still have the R_ii + R_ik distances which are non zero and could be summed
# These need to be set to zero
n_atoms = Zs.get_shape().as_list()[1]
zarray = np.zeros((n_atoms, n_atoms, n_atoms))
for i in range(n_atoms):
for j in range(n_atoms):
for k in range(n_atoms):
if i == j or i == k or j == k:
zarray[i, j, k] = 1
# Make a bool tensor of the indices
where_eq_idx = tf.tile(tf.expand_dims(tf.convert_to_tensor(zarray, dtype=tf.bool), axis=0),
multiples=[tf.shape(sum_dist_tensor)[0], 1, 1, 1])
# For all the elements that are true in where_eq_idx, turn the elements of sum_dist_tensor to zero
zeros_1 = tf.zeros(tf.shape(sum_dist_tensor), dtype=tf.float32)
# Now finding the fc terms
with tf.name_scope("Fc_term"):
# 1. Find where Rij and Rik are < cutoff
where_less_cutoff = tf.less(dist_tensor, angular_cutoff)
# 2. Calculate the fc on the Rij and Rik tensors
fc_1 = 0.5 * (tf.cos(3.14159265359 * dist_tensor / angular_cutoff) + 1.0)
# 3. Apply the mask calculated in 1. to zero the values for where the distances are > than the cutoff
zeros_2 = tf.zeros(tf.shape(dist_tensor), dtype=tf.float32)
cut_off_fc = tf.where(where_less_cutoff, fc_1, zeros_2) # (n_samples, n_atoms, n_atoms)
# 4. Multiply the two tensors elementwise
fc_term = tf.multiply(tf.expand_dims(cut_off_fc, axis=3),
tf.expand_dims(cut_off_fc, axis=2)) # (n_samples, n_atoms, n_atoms, n_atoms)
# 5. Cleaning up the terms that should be zero because there are equal indices
clean_fc_term = tf.where(where_eq_idx, zeros_1, fc_term)
# 6. Cleaning up the terms due to the dummy atoms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask_2d = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
dummy_mask_3d = tf.logical_and(tf.expand_dims(dummy_mask_2d, axis=1), tf.expand_dims(tf.expand_dims(dummy_atoms, axis=-1), axis=-1))
cleaner_fc_term = tf.where(dummy_mask_3d, clean_fc_term, zeros_1)
# Now finding the theta_ijk term
with tf.name_scope("Theta"):
# Doing the dot products of all the possible vectors
dots_dxyzs = tf.cast(tf.reduce_sum(tf.multiply(tf.expand_dims(dxyzs, axis=3), tf.expand_dims(dxyzs, axis=2)),
axis=4), dtype=tf.float32) # (n_samples, n_atoms, n_atoms, n_atoms)
# Doing the products of the magnitudes
dist_prod = tf.multiply(tf.expand_dims(dist_tensor, axis=3),
tf.expand_dims(dist_tensor, axis=2)) # (n_samples, n_atoms, n_atoms, n_atoms)
# Dividing the dot products by the magnitudes to obtain cos theta
cos_theta = tf.divide(dots_dxyzs, dist_prod)
# Taking care of the values that due numerical error are just above 1.0 or below -1.0
cut_cos_theta = tf.clip_by_value(cos_theta, tf.constant(-1.0), tf.constant(1.0))
# Applying arc cos to find the theta value
theta = tf.acos(cut_cos_theta) # (n_samples, n_atoms, n_atoms, n_atoms)
# Removing the NaNs created by dividing by zero
clean_theta = tf.where(where_eq_idx, zeros_1, theta)
# cleaning up NaNs due by dummy atoms
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
dummy_mask_2d = tf.logical_and(tf.expand_dims(dummy_atoms, axis=1), tf.expand_dims(dummy_atoms, axis=-1))
dummy_mask_3d = tf.logical_and(tf.expand_dims(dummy_mask_2d, axis=1),
tf.expand_dims(tf.expand_dims(dummy_atoms, axis=-1), axis=-1))
cleaner_theta = tf.where(dummy_mask_3d, clean_theta, zeros_1)
# Finding the (0.5 * clean_sum_dist - R_s) term
with tf.name_scope("Exp_term"):
# Augmenting the dims of angular_rs
expanded_rs = tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(angular_rs, axis=0), axis=0), axis=0),
axis=0) # (1, 1, 1, 1, n_rs)
# Augmenting the dim of clean_sum_dist *0.5
# expanded_sum = tf.expand_dims(clean_sum_dist * 0.5, axis=-1)
expanded_sum = tf.expand_dims(sum_dist_tensor * 0.5, axis=-1)
# Combining them
brac_term = tf.subtract(expanded_sum, expanded_rs)
# Finally making the exponential term
exponent = - eta * tf.square(brac_term)
exp_term = tf.exp(exponent) # (n_samples, n_atoms, n_atoms, n_atoms, n_rs)
# Finding the cos(theta - theta_s) term
with tf.name_scope("Cos_term"):
# Augmenting the dimensions of theta_s
expanded_theta_s = tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(theta_s, axis=0), axis=0), axis=0),
axis=0)
# Augmenting the dimensions of theta
expanded_theta = tf.expand_dims(cleaner_theta, axis=-1)
# Subtracting them and do the cos
cos_theta_term = tf.cos(
tf.subtract(expanded_theta, expanded_theta_s)) # (n_samples, n_atoms, n_atoms, n_atoms, n_theta_s)
# Make the whole cos term of the sum
cos_term = tf.pow(tf.add(tf.ones(tf.shape(cos_theta_term), dtype=tf.float32), cos_theta_term),
zeta) # (n_samples, n_atoms, n_atoms, n_atoms, n_theta_s)
# Final product of terms inside the sum time by 2^(1-zeta)
expanded_fc = tf.expand_dims(tf.expand_dims(cleaner_fc_term, axis=-1), axis=-1, name="Expanded_fc")
expanded_cos = tf.expand_dims(cos_term, axis=-2, name="Expanded_cos")
expanded_exp = tf.expand_dims(exp_term, axis=-1, name="Expanded_exp")
const = tf.pow(tf.constant(2.0, dtype=tf.float32), (1.0 - zeta))
with tf.name_scope("Ang_term"):
prod_of_terms = const * tf.multiply(tf.multiply(expanded_cos, expanded_exp),
expanded_fc) # (n_samples, n_atoms, n_atoms, n_atoms, n_rs, n_theta_s)
# Reshaping to shape (n_samples, n_atoms, n_atoms, n_atoms, n_rs*n_theta_s)
presum_term = tf.reshape(prod_of_terms,
[tf.shape(prod_of_terms)[0], n_atoms, n_atoms, n_atoms,
theta_s.shape[0] * angular_rs.shape[0]])
return presum_term
def sum_rad(pre_sum, Zs, elements_list, radial_rs):
"""
Sum of the terms in the radial part of the symmetry function. The terms corresponding to the same neighbour identity
are summed together.
:param pre_sum: tf tensor of shape (n_samples, n_atoms, n_atoms, n_rs)
:param Zs: tf tensor of shape (n_samples, n_atoms)
:param elements_list: np.array of shape (n_elements,)
:param radial_rs: tf tensor of shape (n_rad_rs,)
:return: tf tensor of shape (n_samples, n_atoms, n_rad_rd * n_elements)
"""
n_atoms = Zs.get_shape().as_list()[1]
n_elements = len(elements_list)
n_rs = radial_rs.get_shape().as_list()[0]
## Making a matrix of all the possible neighbouring atoms
# No need to clean up diagonal elements because they are already set to zero in the presum term
neighb_atoms = tf.tile(tf.expand_dims(tf.expand_dims(Zs, axis=1), axis=-1),
multiples=[1, n_atoms, 1, n_rs]) # (n_samples, n_atoms, n_atoms, n_rs)
zeros = tf.zeros(tf.shape(pre_sum), dtype=tf.float32)
# Looping over all the possible elements in the system and extracting the relevant terms from the pre_sum term
pre_sum_terms = []
for i in range(n_elements):
element = tf.constant(elements_list[i], dtype=tf.int32)
equal_elements = tf.equal(neighb_atoms, element)
slice_presum = tf.where(equal_elements, pre_sum, zeros)
slice_sum = tf.reduce_sum(slice_presum, axis=[2])
pre_sum_terms.append(slice_sum)
# Concatenating the extracted terms.
final_term = tf.concat(pre_sum_terms, axis=-1, name="sum_rad")
# Cleaning up the dummy atoms descriptors
dummy_atoms = tf.logical_not(tf.equal(Zs, tf.constant(0, dtype=tf.int32))) # False where there are dummy atoms
mask = tf.tile(tf.expand_dims(dummy_atoms, axis=-1), multiples=[1, 1, n_elements*n_rs])
# clean_final_term = tf.where(mask, final_term, tf.zeros(final_term.shape, dtype=tf.float32))
clean_final_term = tf.where(mask, final_term, tf.zeros(tf.shape(final_term), dtype=tf.float32))
return clean_final_term
def sum_ang(pre_sumterm, Zs, element_pairs_list, angular_rs, theta_s):
"""
This function does the sum of the terms in the radial part of the symmetry function. Three body interactions where
the two neighbours are the same elements are summed together.
:param pre_sumterm: tf tensor of shape (n_samples, n_atoms, n_ang_rs * n_thetas)
:param Zs: tf tensor of shape (n_samples, n_atoms)
:param element_pairs_list: np array of shape (n_elementpairs, 2)
:param angular_rs: tf tensor of shape (n_ang_rs,)
:param theta_s: tf tensor of shape (n_thetas,)
:return: tf tensor of shape (n_samples, n_atoms, n_ang_rs * n_thetas * n_elementpairs)
"""
n_atoms = Zs.get_shape().as_list()[1]
n_pairs = len(element_pairs_list)
n_rs = angular_rs.get_shape().as_list()[0]
n_thetas = theta_s.get_shape().as_list()[0]
# Making the pair matrix
Zs_exp_1 = tf.expand_dims(tf.tile(tf.expand_dims(Zs, axis=1), multiples=[1, n_atoms, 1]), axis=-1)
Zs_exp_2 = tf.expand_dims(tf.tile(tf.expand_dims(Zs, axis=-1), multiples=[1, 1, n_atoms]), axis=-1)
neighb_pairs = tf.concat([Zs_exp_1, Zs_exp_2], axis=-1) # (n_samples, n_atoms, n_atoms, 2)
# Cleaning up diagonal elements
zarray = | np.zeros((n_atoms, n_atoms, 2)) | numpy.zeros |
# Calculate Pearson correlation among columns
import os
import pathlib
import pickle
import numpy as np
import pandas as pd
from scipy import stats
from src import MetaData, QueryDatabase
def get_table_values(override=False, sample_size=10000):
"""
Returns an array of column values of given sample size
@type override: object
@return:
"""
column_val_path = f'{os.environ["WORKING_DIRECTORY"]}/results/sampled_columns.obj'
table_names_path = f'{os.environ["WORKING_DIRECTORY"]}/results/table_names.obj'
if not override and os.path.isfile(column_val_path) and os.path.isfile(table_names_path):
with open(column_val_path, 'rb') as file:
columns = pickle.load(file)
with open(table_names_path, 'rb') as file:
table_names = pickle.load(file)
return columns, table_names
tables = MetaData.get_tables(f'{os.environ["WORKING_DIRECTORY"]}/data/datasets.txt')
columns = []
table_names = []
count = 0
for table in tables:
table_name = table.replace(":", ".")
table_path = f'{os.environ["WORKING_DIRECTORY"]}/data/tables/{table_name}.npy'
if not os.path.isfile(table_path):
print(f'table {table_name} does not have numeric columns.')
continue
with open(table_path, 'rb') as file:
table_data = np.load(file, allow_pickle=True)
if table_data.shape[0] > sample_size:
table_data = table_data[np.random.default_rng().choice(table_data.shape[0], sample_size, replace=False)]
for col in np.transpose(table_data):
columns.append(col.astype('float64'))
table_names.append(table_name)
count += 1
print(f'Loaded {count} tables.')
with open(column_val_path, 'wb') as file:
pickle.dump(columns, file)
with open(table_names_path, 'wb') as file:
pickle.dump(table_names, file)
return columns, table_names
def calculate_pearson_correlation(override=False, sample_size=10000, num_permutations=10):
"""
Calculate Pearson correlation for each column combination
@param override:
@param sample_size:
@param num_permutations:
"""
columns, table_names = get_table_values(override=override, sample_size=sample_size)
corr_matrix = np.zeros((len(columns), len(columns)))
count = 0
for i in range(len(columns)):
for j in range(i + 1, len(columns)):
len_i = len(columns[i])
len_j = len(columns[j])
col_i = columns[i]
col_j = columns[j]
table_name_i = table_names[i]
table_name_j = table_names[j]
if table_name_i != table_name_j:
correlation = 0
for _ in range(num_permutations):
col_i = col_i[np.random.default_rng().choice(col_i.shape[0], min(len_i, len_j), replace=False)]
col_j = col_j[np.random.default_rng().choice(col_j.shape[0], min(len_i, len_j), replace=False)]
correlation = max(correlation, stats.pearsonr(col_i, col_j)[0])
else:
correlation = stats.pearsonr(col_i, col_j)[0]
corr_matrix[i][j] = correlation
count += 1
if count % 10000 == 0:
print(f'Completed {count} correlation calculations.')
return corr_matrix
def save_corr_matrix(override=False, sample_size=10000, num_permutations=10):
"""
Saves correlation matrix locally
@param override:
@param sample_size:
@param num_permutations:
@return:
"""
corr_matrix = calculate_pearson_correlation(override=override, sample_size=sample_size,
num_permutations=num_permutations)
np.savez(f'{os.environ["WORKING_DIRECTORY"]}/results/corr_matrix.npz', corr_matrix=corr_matrix)
return corr_matrix
def get_corr_matrix(override=False, sample_size=10000, num_permutations=10):
"""
Get correlation matrix from saved file if present else calculate
@param override:
@param sample_size:
@param num_permutations:
@return:
"""
file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/corr_matrix.npz'
if override or not os.path.isfile(file_path):
corr_matrix = save_corr_matrix(override=override, sample_size=sample_size, num_permutations=num_permutations)
else:
corr_matrix = | np.load(file_path) | numpy.load |
# -*- coding: utf-8 -*-
"""Contains the plotting-specific functions specific to the velocity width analysis."""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from fake_spectra import plot_spectra as ps
from fake_spectra import haloassigned_spectra as hs
import kstest as ks
import vw_spectra as vw
try:
xrange(1)
except NameError:
xrange = range
def _bootstrap_sample(vel_data, v_table, samples, error):
"""Generate a Monte Carlo error sample of the differential distribution."""
# Generate some Monte Carlo samples where each element is perturbed by
# a Gaussian, sigma given by error.
index = np.random.random_integers(0, np.size(vel_data)-1, samples)
bootstrap = vel_data[index]
if error > 0.:
bootstrap += np.random.normal(0,error,size=samples)
nn = np.histogram(bootstrap,v_table)[0]
return nn
class VWPlotSpectra(hs.HaloAssignedSpectra, ps.PlottingSpectra, vw.VWSpectra):
"""Extends PlottingSpectra with velocity width specific code."""
def plot_vel_width(self, elem, ion, dv=0.17, color="red", ls="-"):
"""Plot the velocity widths of this snapshot
Parameters:
elem - element to use
ion - ionisation state: 1 is neutral.
dv - bin spacing
"""
(vbin, vels) = self.vel_width_hist(elem, ion, dv)
plt.semilogx(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
def plot_cum_vel_width(self, elem, ion, norm, dv=0.1, color="red", ls="-"):
"""Plot the velocity widths of this snapshot
Parameters:
elem - element to use
ion - ionisation state: 1 is neutral.
dv - bin spacing
"""
(vbin, vels) = self.vel_width_hist(elem, ion, dv)
cvels = np.cumsum(vels)
cvels = cvels*norm/cvels[-1]
plt.semilogx(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)
def plot_cum_f_peak(self, elem, ion, norm, dv=0.01, color="red", ls="-"):
"""Plot the velocity widths of this snapshot
Parameters:
elem - element to use
ion - ionisation state: 1 is neutral.
dv - bin spacing
"""
(vbin, vels) = self.f_peak_hist(elem, ion, dv)
cvels = np.cumsum(vels)
cvels = cvels*norm/cvels[-1]
plt.plot(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{edg}$")
def plot_f_meanmedian_errors(self, elem, ion, samples, cumulative=False, nv_table = 11, color="red"):
"""Plot 68% contour for error on the fmm distribution"""
f_peak = self.vel_mean_median(elem, ion)
ind = self.get_filt(elem, ion)
f_peak = f_peak[ind]
v_table=np.linspace(0,1,nv_table)
self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)
def plot_f_peak_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color="red"):
"""Plot 68% contour for error on the fpeak distribution"""
f_peak = self.vel_peak(elem, ion)
ind = self.get_filt(elem, ion)
f_peak = f_peak[ind]
v_table=np.linspace(0,1,nv_table)
self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)
def plot_eq_width_errors(self, elem, ion, line, samples, cumulative=False, min_width = -1.6, nv_table=11, color="red"):
"""Plot 68% contour for error on the fpeak distribution"""
eq_width = self.equivalent_width(elem, ion, line)
ind = self.get_filt(elem, ion)
eq_width = eq_width[ind]
v_table = np.logspace(min_width, np.log10(np.max(eq_width)), nv_table)
self._plot_errors(np.log10(eq_width), np.log10(v_table), samples, 0.05, cumulative, False, color)
def plot_vw_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color="red"):
"""Plot 68% contour for error on the velocity width distribution"""
vel_width = self.vel_width(elem, ion)
ind = self.get_filt(elem, ion)
vel_width = vel_width[ind]
v_table=np.logspace(1,np.log10(np.max(vel_width)+10),nv_table)
self._plot_errors(vel_width, v_table, samples, 5, cumulative, True, color)
def _plot_errors(self, vel_data, v_table, samples, error, cumulative=False, lognorm=True, color="red"):
"""Find and plot a 68% contour for a subsample of size samples, by Monte Carlo."""
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
#Get a subsample
cdfs = np.array([_bootstrap_sample(vel_data, v_table, samples, error) for _ in xrange(10000)])
if cumulative:
cdfs = np.cumsum(cdfs, axis=1)
norm = 1
else:
if lognorm:
v_table = np.log10(v_table)
norm = samples * np.array([(-v_table[i]+v_table[i+1]) for i in xrange(np.size(v_table)-1)])
lower = np.percentile(cdfs, 16, axis=0)/norm
upper = np.percentile(cdfs, 84, axis=0)/norm
plt.fill_between(vbin, lower, upper, color=color, alpha=0.3)
def plot_f_meanmedian(self, elem, ion, dv=0.06, color="red", ls="-"):
"""
Plot an f_mean_median histogram
For args see plot_vel_width
"""
(vbin, vels) = self.f_meanmedian_hist(elem, ion, dv)
plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{mm}$")
def plot_f_peak(self, elem, ion, dv=0.06, color="red", ls="-"):
"""
Plot an f_peak histogram
For args see plot_vel_width
"""
(vbin, vels) = self.f_peak_hist(elem, ion, dv)
plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)
plt.xlabel(r"$f_\mathrm{edg}$")
def plot_sep_frac(self,elem = "Si", ion = 2, thresh = 1e-1, mindist = 15, dv = 0.2, color="blue", ls="-"):
"""
Plots the fraction of spectra in each velocity width bin which are separated.
Threshold is as a percentage of the maximum value.
mindist is in km/s
"""
sep = self.get_separated(elem, ion, thresh,mindist)
vels = self.vel_width(elem, ion)
ind = self.get_filt(elem, ion)
v_table = 10**np.arange(1, 3, dv)
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
hist1 = np.histogram(vels[ind], v_table)
hist2 = np.histogram(vels[ind][sep],v_table)
hist1[0][np.where(hist1[0] == 0)] = 1
plt.semilogx(vbin, hist2[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)
def plot_vel_width_breakdown(self, elem = "Si", ion = 2, dv = 0.1):
"""
Plots the fraction of the total velocity width histogram in a series of virial velocity bins
"""
#Find velocity width
vels = self.vel_width(elem, ion)
ii = self.get_filt(elem, ion)
self._plot_breakdown(vels,ii,(0, 60, 120), (60, 120, 900), ("< 60", "60-120", "> 120"),dv)
plt.xlabel(r"$v_\mathrm{90}$ (km s$^{-1}$)")
plt.ylim(0,1)
def plot_f_peak_breakdown(self, elem = "Si", ion = 2, dv = 0.05):
"""
Plots the fraction of the total fedge histogram in a series of virial velocity bins
"""
#Find velocity width
vels = self.vel_peak(elem, ion)
ii = self.get_filt(elem, ion)
self._plot_breakdown(vels,ii,(0, 50), (50, 900), ("< 50", "> 50"),dv, False)
plt.xlabel(r"$f_\mathrm{edg}$")
plt.ylim(0,1)
plt.xlim(0,1)
plt.legend(loc=1,ncol=2)
def plot_mult_halo_frac(self,elem = "Si", ion = 2, dv = 0.2, color="blue", ls="-"):
"""
Plots the fraction of spectra in each velocity width bin which are separated.
Threshold is as a percentage of the maximum value.
mindist is in km/s
"""
#Find velocity width
(halos, subhalos) = self.find_nearby_halos()
vels = self.vel_width(elem, ion)
ii = self.get_filt(elem, ion)
#Find virial velocity
(halo, _) = self.find_nearest_halo()
ind = np.where(halo[ii] > 0)
# virial = np.ones_like(halo, dtype=np.double)
# virial[ind] = self.virial_vel(halo[ind])
vwvir = vels[ii][ind] #/virial[ind]
#Make bins
v_table = 10**np.arange(np.min(np.log10(vwvir)),np.max(np.log10(vwvir)) , dv)
vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])
#Histogram of vel width / virial vel
hist1 = np.histogram(vwvir, v_table)
hist1[0][np.where(hist1[0] == 0)] = 1
#Find places with multiple halos
subhalo_parent = [list(self.sub_sub_index[ss]) for ss in subhalos]
allh = np.array([list(set(subhalo_parent[ii] + halos[ii])) for ii in xrange(self.NumLos)])
indmult = np.where([len(aa) > 1 for aa in allh[ind]])
histmult = np.histogram(vwvir[indmult],v_table)
plt.semilogx(vbin, histmult[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)
def plot_Z_vs_vel_width(self,elem="Si", ion=2, color="blue",color2="darkblue"):
"""Plot the correlation between metallicity and velocity width"""
vel = self.vel_width(elem, ion)
met = self.get_metallicity()
#Ignore objects too faint to be seen
ind2 = np.where(met > 1e-4)
met = met[ind2]
vel = vel[ind2]
self._plot_2d_contour(vel, met, 10, "Z vel sim", color, color2, fit=True)
plt.plot(vel, met, 'o', color=color)
plt.xlim(10,2e3)
plt.ylabel(r"$\mathrm{Z} / \mathrm{Z}_\odot$")
plt.xlabel(r"$v_\mathrm{90}$ (km s$^{-1}$)")
def plot_vel_vs_mass(self,elem, ion, color="blue",color2="darkblue"):
"""Plot the correlation between mass and metallicity, with a fit"""
vel = self.vel_width(elem, ion)
self._plot_xx_vs_mass(vel, "vel",color,color2)
def kstest(self, Zdata, veldata, elem="Si", ion=2):
"""Find the 2D KS test value of the vel width and log metallicity
with respect to an external dataset, veldata and Z data"""
met = self.get_metallicity()
ind = self.get_filt(elem, ion)
met = np.log10(met[ind])
vel = np.log10(self.vel_width(elem, ion)[ind])
data2 = np.array([met,vel]).T
data = np.array([np.log10(Zdata), np.log10(veldata)]).T
return ks.ks_2d_2samp(data,data2)
def plot_virial_vel_vs_vel_width(self,elem, ion,color="red", ls="-", label="", dm=0.1):
"""Plot a histogram of the velocity widths vs the halo virial velocity"""
(halos, _) = self.find_nearest_halo()
ind = self.get_filt(elem,ion)
f_ind = np.where(halos[ind] != -1)
vel = self.vel_width(elem, ion)[ind][f_ind]
virial = self.virial_vel(halos[ind][f_ind])+0.1
vvvir = vel/virial
m_table = 10**np.arange(np.log10(np.min(vvvir)), np.log10(np.max(vvvir)), dm)
mbin = np.array([(m_table[i]+m_table[i+1])/2. for i in range(0, | np.size(m_table) | numpy.size |
#Some tools for sound processing and visualization.
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.io.wavfile
from scipy import fftpack
from skimage import util
from magenta.models.nsynth import utils
from magenta.models.nsynth.wavenet import fastgen
#Need to install this magenta model in directory in order to execute.
audio_file_path = 'Sample_Audio.wav'
#scipy function wavfile.read, just for sample rate in case of unknown.
def getSampleRate(filename):
fid = open(filename, 'rb')
try:
file_size, is_big_endian = scipy.io.wavfile._read_riff_chunk(fid) # find out how to read the file
channels = 1 # assume 1 channel and 8 bit depth if there is no format chunk
bit_depth = 8
while fid.tell() < file_size: #read the file a couple of bytes at a time
# read the next chunk
chunk_id = fid.read(4)
if chunk_id == b'fmt ': # retrieve formatting information
fmt_chunk = scipy.io.wavfile._read_fmt_chunk(fid, is_big_endian)
format_tag, channels, fs = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if bit_depth not in (8, 16, 32, 64, 96, 128):
raise ValueError("Unsupported bit depth: the wav file "
"has {}-bit data.".format(bit_depth))
finally:
if not hasattr(filename, 'read'):
fid.close()
else:
fid.seek(0)
print(fs)
#Magenta model to synthezise new sound. Uses librosa as one of the core modules.
def Plot_SingleFile(file_name, sampleRate):
audio = utils.load_audio(file_name, sample_length=70000) #sample_length for how long will it be.
sample_length = audio.shape[0]
print('{} samples, {} seconds'.format(sample_length, sample_length / float(sampleRate)))
#Encoding for new sound part.
encoding = fastgen.encode(audio, 'model.ckpt-200000', sample_length)
print(encoding.shape)
np.save(file_name + '.npy', encoding)
fig, axs = plt.subplots(2, 1, figsize = (10,5))
axs[0].plot(audio)
axs[0].set_title('Audio Signal')
axs[1].plot(encoding[0]);
axs[1].set_title('NSynth Encoding')
#synthesis
fastgen.synthesize(encoding, save_paths=['gen_' + file_name], samples_per_save=sample_length)
#To combine sounds (Magenta takes in representation tumbre, tonality and change over time)
def load_encoding(fname, sample_lenght = None, sr = 16000, ckpt = 'model.ckpt-200000'):
audio = utils.load_audio(fname, sample_length = sample_lenght, sr = sr)
encoding = fastgen.encode(audio, ckpt, sample_lenght)
return audio, encoding
def Combine_Plot(file1, file2):
sample_length = 20000 #Duration
aud1, enc1 = load_encoding(file1, sample_length)
aud2, enc2 = load_encoding(file2
, sample_length)
enc_mix = (enc1 + enc2)/ 2.0
fig, axs = plt.subplots(3, 1, figsize = (10, 7))
fig, axs = plt.subplots(3, 1, figsize=(10, 7))
axs[0].plot(enc1[0]);
axs[0].set_title('Encoding 1')
axs[1].plot(enc2[0]);
axs[1].set_title('Encoding 2')
axs[2].plot(enc_mix[0]);
axs[2].set_title('Average')
def fade(encoding, mode='in'):
length = encoding.shape[1]
fadein = (0.5 * (1.0 - np.cos(3.1415 * np.arange(length) /
float(length)))).reshape(1, -1, 1)
if mode == 'in':
return fadein * encoding
else:
return (1.0 - fadein) * encoding
def crossfade(encoding1, encoding2):
return fade(encoding1, 'out') + fade(encoding2, 'in')
def Combine_Synth(file1, file2):
sample_length = 20000 #Duration
aud1, enc1 = load_encoding(file1, sample_length)
aud2, enc2 = load_encoding(file2, sample_length)
fastgen.synthesize(crossfade(enc1, enc2), save_paths = ['crossfade.wav'])
#Visualization!
def fft_index(n):
return np.append(np.arange(n//2,n), np.arange(0, n//2))
def fft_unpack(x):
return [x[i] for i in fft_index(len(x))]
def fft(x):
X = fftpack.fft(x)
return fft_unpack(X)
def SinglePlot(sampleRate, dataR, freqDataR):
plt.subplot(411)
timeAxis = np.arange(0,len(dataR)/sampleRate,1/sampleRate)
plt.plot(timeAxis[0:1000], dataR[0:1000])
plt.subplot(412)
freqAxis = sampleRate*np.arange(-1/2,1/2,1/len(freqDataR))
plt.plot(freqAxis, freqDataR)
plt.show()
def waveFormPlot(file):
rate, audio = scipy.io.wavfile.read(file)
#audio = np.mean(audio, axis = 1) #converting file to mono by
#average of left and right side.
N = audio.shape[0]
L = N/rate
f, ax = plt.subplots()
ax.plot(np.arange(N)/rate, audio)
ax.set_xlabel('Time: Seconds')
ax.set_ylabel('Amplitude')
print('Audio lenght: {:.2f} seconds'.format(L))
def spectogramPlot(file):
M = 1024 #sample number, around 0.2 seconds
rate, data = scipy.io.wavfile.read(file)
N = data.shape[0]
L = N/rate
slices = util.view_as_windows(data, window_shape = (M,), step = 1)
print('Audio shape: {}, Sliced audio shape: {}'.format(data.shape, slices.shape))
win = np.hanning(M + 1)[:-1]
slices = slices*win
slices = slices.T
print('Shape of slices:', slices.shape)
spectrum = np.fft.fft(slices, axis = 0)[:M//2 + 1:-1]
spectrum = np.abs(spectrum)
f, ax = plt.subplots(figsize = (4.8, 2.4))
S = | np.abs(spectrum) | numpy.abs |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.utils import macos_version
from coremltools.models.neural_network import flexible_shape_utils
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape( | np.arange(5, 9) | numpy.arange |
"""
NASBench-201 search space, rollout, embedder
"""
import os
import re
import copy
import random
import pickle
import itertools
import collections
from typing import List, Optional, NamedTuple
from collections import defaultdict, OrderedDict
import contextlib
import six
import yaml
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from nas_201_api import NASBench201API as API
from aw_nas import utils, ops
from aw_nas.common import SearchSpace
from aw_nas.rollout.base import BaseRollout
from aw_nas.evaluator.base import BaseEvaluator
from aw_nas.controller.base import BaseController
from aw_nas.controller import DiffController
from aw_nas.evaluator.arch_network import ArchEmbedder
from aw_nas.utils import (
DenseGraphSimpleOpEdgeFlow,
DenseGraphConvolution,
data_parallel,
use_params,
softmax,
)
from aw_nas.utils.parallel_utils import _check_support_candidate_member_mask
from aw_nas.weights_manager.base import BaseWeightsManager, CandidateNet
from aw_nas.final.base import FinalModel
VERTICES = 4
class NasBench201SearchSpace(SearchSpace):
NAME = "nasbench-201"
def __init__(
self,
num_layers=17,
vertices=4,
load_nasbench=True,
ops_choices=(
"none",
"skip_connect",
"nor_conv_1x1",
"nor_conv_3x3",
"avg_pool_3x3",
),
):
super(NasBench201SearchSpace, self).__init__()
self.ops_choices = ops_choices
self.ops_choice_to_idx = {
choice: i for i, choice in enumerate(self.ops_choices)
}
self.load_nasbench = load_nasbench
self.num_vertices = vertices
self.num_layers = num_layers
self.none_op_ind = self.ops_choices.index("none")
self.num_possible_edges = self.num_vertices * (self.num_vertices - 1) // 2
self.num_op_choices = len(self.ops_choices) # 5
self.num_ops = self.num_vertices * (self.num_vertices - 1) // 2
self.idx = np.tril_indices(self.num_vertices, k=-1)
self.genotype_type = str
if self.load_nasbench:
self._init_nasbench()
def canonicalize(self, rollout):
# TODO
arch = rollout.arch
num_vertices = rollout.search_space.num_vertices
op_choices = rollout.search_space.ops_choices
S = []
S.append("0")
res = ""
for i in range(1, num_vertices):
preS = []
s = ""
for j in range(i):
if ((int(arch[i][j]) == 0) or (S[j] == "#")):
s = "#"
elif (int(arch[i][j]) == 1):
s = S[j]
else:
s = "(" + S[j] + ")" + "@" + op_choices[int(arch[i][j])]
preS.append(s)
preS.sort()
s = ""
for j in range(i):
s = s + preS[j]
S.append(s)
res = s
return res
def __getstate__(self):
state = super(NasBench201SearchSpace, self).__getstate__().copy()
if "api" in state:
del state["api"]
return state
def __setstate__(self, state):
super(NasBench201SearchSpace, self).__setstate__(state)
if self.load_nasbench:
self._init_nasbench()
# optional API
def genotype_from_str(self, genotype_str):
return genotype_str
# ---- APIs ----
def random_sample(self):
return NasBench201Rollout(self.random_sample_arch(), search_space=self)
def genotype(self, arch):
# return the corresponding ModelSpec
# edges, ops = arch
return self.matrix2str(arch)
def rollout_from_genotype(self, genotype):
return NasBench201Rollout(API.str2matrix(genotype), search_space=self)
def plot_arch(self, genotypes, filename, label, plot_format="pdf", **kwargs):
matrix = self.str2matrix(genotypes)
from graphviz import Digraph
graph = Digraph(
format=plot_format,
# https://stackoverflow.com/questions/4714262/graphviz-dot-captions
body=['label="{l}"'.format(l=label), "labelloc=top", "labeljust=left"],
edge_attr=dict(fontsize="20", fontname="times"),
node_attr=dict(
style="filled",
shape="rect",
align="center",
fontsize="20",
height="0.5",
width="0.5",
penwidth="2",
fontname="times",
),
engine="dot",
)
graph.body.extend(["rankdir=LR"])
graph.node(str(0), fillcolor="darkseagreen2")
graph.node(str(self.num_vertices - 1), fillcolor="palegoldenrod")
[
graph.node(str(i), fillcolor="lightblue")
for i in range(1, self.num_vertices - 1)
]
for to_, from_ in zip(*self.idx):
op_name = self.ops_choices[int(matrix[to_, from_])]
if op_name == "none":
continue
graph.edge(str(from_), str(to_), label=op_name, fillcolor="gray")
graph.render(filename, view=False)
fnames = []
fnames.append(("cell", filename + ".{}".format(plot_format)))
return fnames
def distance(self, arch1, arch2):
pass
@classmethod
def supported_rollout_types(cls):
return ["nasbench-201", "nasbench-201-differentiable"]
def mutate(self, rollout): # pylint: disable=arguments-differ
rand_ind = np.random.randint(0, self.idx[0].shape[0])
neighbor_choice = np.random.randint(0, self.num_op_choices)
arch_mat = rollout.arch
while neighbor_choice == arch_mat[self.idx[0][rand_ind], self.idx[1][rand_ind]]:
neighbor_choice = np.random.randint(0, self.num_op_choices)
new_arch_mat = copy.deepcopy(arch_mat)
new_arch_mat[self.idx[0][rand_ind], self.idx[1][rand_ind]] = neighbor_choice
return NasBench201Rollout(new_arch_mat, self)
# ---- helpers ----
def matrix2str(self, arch):
node_strs = []
for i_node in range(1, self.num_vertices):
node_strs.append(
"|"
+ "|".join(
[
"{}~{}".format(
self.ops_choices[int(arch[i_node, i_input])], i_input
)
for i_input in range(0, i_node)
]
)
+ "|"
)
return "+".join(node_strs)
def str2matrix(self, str_):
arch = np.zeros((self.num_vertices, self.num_vertices))
split_str = str_.split("+")
for ind, s in enumerate(split_str):
geno = [name for name in s.split("|") if name != ""]
for g in geno:
name, conn = g.split("~")
to_ = ind + 1
from_ = int(conn)
arch[to_][from_] = self.ops_choices.index(name)
return arch
def _init_nasbench(self):
# the arch -> performances dataset
self.base_dir = os.path.join(
utils.get_awnas_dir("AWNAS_DATA", "data"), "nasbench-201"
)
self.api = API(os.path.join(self.base_dir, "NAS-Bench-201-v1_0-e61699.pth"))
def op_to_idx(self, ops):
return [self.ops_choice_to_idx[op] for op in ops]
def random_sample_arch(self):
arch = np.zeros((self.num_vertices, self.num_vertices))
arch[np.tril_indices(self.num_vertices, k=-1)] = np.random.randint(
low=0, high=self.num_op_choices, size=self.num_ops
)
return arch
def batch_rollouts(self, batch_size, shuffle=True, max_num=None):
len_ = ori_len_ = len(self.api)
if max_num is not None:
len_ = min(max_num, len_)
indexes = np.arange(ori_len_)
np.random.shuffle(indexes)
ind = 0
while ind < len_:
end_ind = min(len_, ind + batch_size)
yield [
NasBench201Rollout(
matrix=self.api.str2matrix(self.api.arch(r_ind)), search_space=self
)
for r_ind in indexes[ind:end_ind]
]
ind = end_ind
class NasBench201Rollout(BaseRollout):
NAME = "nasbench-201"
supported_components = [("controller", "rl"), ("evaluator", "mepa")]
def __init__(self, matrix, search_space):
super(NasBench201Rollout, self).__init__()
self.arch = matrix
self.search_space = search_space
self.perf = collections.OrderedDict()
self._genotype = None
def set_candidate_net(self, c_net):
raise Exception("Should not be called")
def plot_arch(self, filename, label="", edge_labels=None):
return self.search_space.plot_arch(
self.genotype, filename, label=label, edge_labels=edge_labels
)
@property
def genotype(self):
if self._genotype is None:
self._genotype = self.search_space.genotype(self.arch)
return self._genotype
def __repr__(self):
return "NasBench201Rollout(matrix={arch}, perf={perf})".format(
arch=self.arch, perf=self.perf
)
try: # Python >= 3.6
class DiffArch(NamedTuple):
op_weights: torch.Tensor
edge_norms: Optional[torch.Tensor] = None
except (SyntaxError, TypeError):
DiffArch = NamedTuple(
"DiffArch",
[("op_weights", torch.Tensor), ("edge_norms", Optional[torch.Tensor])],
)
class NasBench201DiffRollout(BaseRollout):
NAME = "nasbench-201-differentiable"
supported_components = [
("controller", "nasbench-201-gcn-differentiable"),
("evaluator", "mepa"),
("trainer", "simple"),
]
def __init__(
self, arch: List[DiffArch], sampled, logits, search_space, candidate_net=None
):
super(NasBench201DiffRollout, self).__init__()
self.arch = arch
self.sampled = sampled
self.logits = logits
self.search_space = search_space
self.candidate_net = candidate_net
self._genotype = None
self._discretized_arch = None
self._edge_probs = None
def set_candidate_net(self, c_net):
self.candidate_net = c_net
def plot_arch(self, filename, label="", edge_labels=None):
if edge_labels is None:
edge_labels = self.discretized_arch_and_prob[1]
return self.search_space.plot_arch(
self.genotype, filename, label=label, edge_labels=edge_labels
)
def genotype_list(self):
return list(self.genotype._asdict().items())
def parse(self, weights):
probs = softmax(self.logits)
start = 0
n = 1
num_steps = self.search_space.num_vertices
arch = [[], []]
edge_prob = []
for _ in range(1, num_steps):
end = start + n
w = weights[start:end]
prob = probs[start:end]
edges = sorted(range(n), key=lambda x: -max(w[x]))
arch[0] += edges
op_lst = [np.argmax(w[edge]) for edge in edges]
edge_prob += [
"{:.3f}".format(prob[edge][op_id]) for edge, op_id in zip(edges, op_lst)
]
arch[1] += op_lst
n += 1
start = end
num = self.search_space.num_vertices
archs = [[0 for i in range(num)] for i in range(num)]
p = 0
for i in range(1, num):
for j in range(i):
archs[i][arch[0][p]] = arch[1][p]
p += 1
return np.array(archs), edge_prob
@property
def discretized_arch_and_prob(self):
if self._discretized_arch is None:
if self.arch[0].edge_norms is None:
weights = self.sampled
else:
edge_norms = utils.get_numpy(self.arch.edge_norms)
weights = utils.get_numpy(self.sampled) * edge_norms
self._discretized_arch, self._edge_probs = self.parse(weights)
return self._discretized_arch, self._edge_probs
@property
def genotype(self):
if self._genotype is None:
self._genotype = self.search_space.genotype(
self.discretized_arch_and_prob[0]
)
return self._genotype
def __repr__(self):
return (
"NasBench201DiffRollout(search_space={sn}, arch={arch}, "
"candidate_net={cn}, perf={perf})"
).format(
sn=self.search_space.NAME,
arch=self.arch,
cn=self.candidate_net,
perf=self.perf,
)
class NasBench201RSController(BaseController):
NAME = "nasbench-201-rs"
def __init__(
self,
search_space,
device,
rollout_type="nasbench-201",
mode="eval",
check_valid=True,
avoid_repeat=False,
fair=False,
deiso=False,
op_type=0,
pickle_file="",
text_file="",
shuffle_indices_avoid_repeat=True,
schedule_cfg=None,
):
super(NasBench201RSController, self).__init__(
search_space, rollout_type, mode, schedule_cfg
)
# get the infinite iterator of the model matrix and ops
self.mode = mode
self.num_vertices = self.search_space.num_vertices
self.cur_solution = self.search_space.random_sample_arch()
self.num_op_choices = self.search_space.num_op_choices
self.num_ops = self.search_space.num_ops
self.check_valid = check_valid
self.avoid_repeat = avoid_repeat
self.fair = fair
self.deiso = deiso
self.pickle_file = pickle_file
self.text_file = text_file
self.shuffle_indices_avoid_repeat = shuffle_indices_avoid_repeat
self.lines = None
if self.text_file:
with open(self.text_file) as rf:
self.lines = rf.readlines()
elif self.pickle_file:
with open(self.pickle_file, "rb") as rf:
self.lines = pickle.load(rf)
else:
# if neither text_file nor pickle_file is speficied,
# assume non-isom{num op choices}.txt is under the awnas data dir
base_dir = os.path.join(utils.get_awnas_dir("AWNAS_DATA", "data"), "nasbench-201")
isom_table_fname = os.path.join(base_dir, "non-isom{}.txt".format(self.num_op_choices))
if self.deiso:
assert os.path.exists(isom_table_fname)
with open(isom_table_fname) as rf:
self.lines = rf.readlines()
if self.lines is not None:
self.arch_num = len(self.lines)
else:
self.arch_num = 15625
if self.deiso:
print("Deiso arch num: ", self.arch_num)
self.index = 0
self.indices = np.arange(self.arch_num)
if self.shuffle_indices_avoid_repeat:
np.random.shuffle(self.indices)
def random_sample_nonisom(self):
ind = np.random.randint(low=0, high=self.arch_num)
arch = self.search_space.str2matrix(self.lines[ind].strip())
return NasBench201Rollout(arch, self.search_space)
def check_valid_arch(self, arch):
valid_arch = False
valid_input = [0]
for to_ in range(1, self.num_vertices):
for input_ in valid_input:
if arch[to_][input_] > 0:
valid_input.append(to_)
break
valid_output = [self.search_space.num_vertices - 1]
for from_ in range(self.search_space.num_vertices - 2, -1, -1):
for output_ in valid_output:
if arch[output_][from_] > 0:
valid_output.append(from_)
for input_ in valid_input:
for output_ in valid_output:
if (
self.search_space.ops_choices[int(arch[output_][input_])].find(
"conv"
)
!= -1
):
valid_arch = True
return valid_arch
def sample(self, n=1, batch_size=None):
rollouts = []
if self.avoid_repeat:
if self.deiso or self.num_op_choices != 5:
# assert n == self.arch_num
for i in range(n):
line = self.lines[i].strip()
rollouts.append(
NasBench201Rollout(
self.search_space.str2matrix(line), self.search_space
)
)
elif self.pickle_file:
for line in self.lines:
rollouts.append(NasBench201Rollout(line[0], self.search_space))
else:
next_index = self.index + n
# indexes = np.random.choice(np.arange(15625), size=n, replace=False)
if self.text_file:
rollouts = [NasBench201Rollout(
self.search_space.str2matrix(self.lines[self.indices[i]].strip()),
self.search_space)
for i in range(self.index, min(next_index, 15625))]
else:
rollouts = [NasBench201Rollout(
self.search_space.api.str2matrix(
self.search_space.api.query_by_index(self.indices[i]).arch_str
),
self.search_space,
) for i in range(self.index, min(next_index, 15625))]
if next_index >= 15625:
# reshuffle the indices
if self.shuffle_indices_avoid_repeat:
np.random.shuffle(self.indices)
next_index = next_index - 15625
if self.text_file:
rollouts += [NasBench201Rollout(
self.search_space.str2matrix(self.lines[self.indices[i]].strip()),
self.search_space)
for i in range(0, next_index)]
else:
rollouts += [NasBench201Rollout(
self.search_space.api.str2matrix(
self.search_space.api.query_by_index(self.indices[i]).arch_str
),
self.search_space)
for i in range(0, next_index)]
self.index = next_index
return rollouts
if self.fair:
assert n == self.num_op_choices
archs = np.zeros([self.num_op_choices,
self.search_space.num_vertices,
self.search_space.num_vertices])
ops = np.array([
np.random.permutation(np.arange(self.num_op_choices))
for _ in range(self.num_ops)
]).T
for i in range(self.num_op_choices):
archs[i][self.search_space.idx] = ops[i]
rollouts = [NasBench201Rollout(arch, self.search_space) for arch in archs
if self.check_valid_arch(arch) or not self.check_valid]
return rollouts
for i in range(n):
while 1:
if self.deiso:
new_rollout = self.random_sample_nonisom()
elif self.pickle_file:
new_rollout = NasBench201Rollout(
self.lines[np.random.randint(0, len(self.lines))][0],
self.search_space,
)
else:
new_rollout = self.search_space.random_sample()
if self.check_valid_arch(new_rollout.arch) or not self.check_valid:
rollouts.append(new_rollout)
break
return rollouts
@classmethod
def supported_rollout_types(cls):
return ["nasbench-201"]
# ---- APIs that is not necessary ----
def set_mode(self, mode):
self.mode = mode
def step(self, rollouts, optimizer, perf_name):
pass
def set_device(self, device):
pass
def summary(self, rollouts, log=False, log_prefix="", step=None):
pass
def save(self, path):
pass
def load(self, path):
self.logger.info("nasbench-201-rs controller would not be loaded from the disk")
class GCN(nn.Module):
def __init__(self, num_vertices, layers, size):
super(GCN, self).__init__()
self.gcns = []
for i in range(layers):
self.gcns.append(
DenseGraphConvolution(
in_features=size,
out_features=size,
plus_I=False,
normalize=False,
bias=False,
)
)
self.gcns = nn.ModuleList(self.gcns)
self.layers = layers
self.num_vertices = num_vertices
def forward(self, x):
adj = np.zeros((self.num_vertices, self.num_vertices), dtype=np.float32)
for i in range(self.num_vertices):
for j in range(i):
adj[j][i] = 1.0 / (j + 1)
adj = (torch.from_numpy(adj) + torch.eye(self.num_vertices, dtype=torch.float32)).cuda()
out = x
for i in range(self.layers):
out = self.gcns[i](out, adj)
if i != self.layers - 1:
out = F.relu(out)
return out
class MLP(nn.Module):
def __init__(self, num_vertices, layers, size):
super(MLP, self).__init__()
self.num_vertices = num_vertices
self.net = []
for i in range(1, layers + 1):
self.net.append(nn.Linear(size[i - 1], size[i]))
self.net = nn.ModuleList(self.net)
self.layers = layers
def forward_single(self, x):
out = x
for i in range(self.layers):
out = self.net[i](out)
if i != self.layers - 1:
out = F.relu(out)
return out
def forward(self, x):
prob = []
for i in range(self.num_vertices):
for j in range(i):
out = self.forward_single(torch.cat([x[j], x[i]]))
prob.append(out)
return prob
class NasBench201DiffController(DiffController, nn.Module):
"""
Differentiable controller for nasbench-201.
"""
NAME = "nasbench-201-differentiable"
SCHEDULABLE_ATTRS = [
"gumbel_temperature",
"entropy_coeff",
"force_uniform"
]
def __init__(self, search_space: SearchSpace, device: torch.device,
rollout_type: str = "nasbench-201-differentiable",
use_prob: bool = False, gumbel_hard: bool = False,
gumbel_temperature: float = 1.0, entropy_coeff: float = 0.01,
max_grad_norm: float = None, force_uniform: bool = False,
inspect_hessian_every: int = -1, schedule_cfg = None):
BaseController.__init__(self, search_space, rollout_type, schedule_cfg = schedule_cfg)
nn.Module.__init__(self)
self.device = device
# sampling
self.use_prob = use_prob
self.gumbel_hard = gumbel_hard
self.gumbel_temperature = gumbel_temperature
# training
self.entropy_coeff = entropy_coeff
self.max_grad_norm = max_grad_norm
self.force_uniform = force_uniform
self.inspect_hessian_every = inspect_hessian_every
self.inspect_hessian = False
self.cg_alpha = nn.Parameter(1e-3 *
torch.randn(self.search_space.num_possible_edges, self.search_space.num_op_choices)
)
# meta learning related
self.params_clone = None
self.buffers_clone = None
self.grad_clone = None
self.grad_count = 0
self.to(self.device)
def sample(self, n: int = 1, batch_size: int = None):
assert batch_size is None or batch_size == 1, "Do not support sample batch size for now"
rollouts = []
for _ in range(n):
alpha = torch.zeros_like(self.cg_alpha) if self.force_uniform else self.cg_alpha
if self.use_prob:
sampled = F.softmax(alpha / self.gumbel_temperature, dim = -1)
else:
# gumbel sampling
sampled, _ = utils.gumbel_softmax(alpha, self.gumbel_temperature, hard = False)
op_weights_list = utils.straight_through(sampled) if self.gumbel_hard else sampled
sampled_list = utils.get_numpy(sampled)
logits_list = utils.get_numpy(alpha)
arch_list = [
DiffArch(op_weights = op_weights, edge_norms = None)
for op_weights in op_weights_list
]
rollouts.append(
NasBench201DiffRollout(
arch_list, sampled_list, logits_list, self.search_space
)
)
return rollouts
def _entropy_loss(self):
if self.entropy_coeff is not None:
prob = F.softmax(self.cg_alpha, dim = -1)
return - self.entropy_coeff * (torch.log(prob) * prob).sum()
return 0.
def summary(self, rollouts, log: bool = False, log_prefix: str = "", step: int = None):
num = len(rollouts)
logits_list = [[utils.get_numpy(logits) for logits in r.logits] for r in rollouts]
if self.gumbel_hard:
cg_logprob = 0.
cg_entro = 0.
for rollout, logits in zip(rollouts, logits_list):
prob = utils.softmax(logits)
logprob = np.log(prob)
if self.gumbel_hard:
op_weights = [arch.op_weights.tolist() for arch in rollout.arch]
inds = np.argmax(utils.get_numpy(op_weights), axis=-1)
cg_logprob += np.sum(logprob[range(len(inds)), inds])
cg_entro += -(prob * logprob).sum()
# mean across rollouts
if self.gumbel_hard:
cg_logprob /= num
cg_logprobs_str = "{:.2f}".format(cg_logprob)
cg_entro /= num
cg_entro_str = "{:.2f}".format(cg_entro)
if log:
# maybe log the summary
self.logger.info("%s%d rollouts: %s ENTROPY: %2f (%s)",
log_prefix, num,
"-LOG_PROB: %.2f (%s) ;" % (-cg_logprob, cg_logprobs_str) \
if self.gumbel_hard else "",
cg_entro, cg_entro_str)
if step is not None and not self.writer.is_none():
if self.gumbel_hard:
self.writer.add_scalar("log_prob", cg_logprob, step)
self.writer.add_scalar("entropy", cg_entro, step)
stats = [("ENTRO", cg_entro)]
if self.gumbel_hard:
stats += [("LOGPROB", cg_logprob)]
return OrderedDict(stats)
@classmethod
def supported_rollout_types(cls):
return ["nasbench-201-differentiable"]
class NasBench201GcnController(BaseController, nn.Module):
"""
Implementation following Neural Graph Embedding for Neural Architecture Search, AAAI 2020
"""
NAME = "nasbench-201-gcn-differentiable"
def __init__(
self,
search_space,
device="cuda",
mode="val",
rollout_type="nasbench-201-differentiable",
embed_size=10,
gcn_layers=5,
mlp_layers=3,
mlp_size=[15, 10],
use_prob=False,
gumbel_hard=False,
gumbel_temp=1.0,
use_edge_norm=False,
entropy_coeff=0.01,
max_grad_norm=None,
force_uniform=False,
inspect_hessian_every=-1,
schedule_cfg=None,
):
super(NasBench201GcnController, self).__init__(
search_space, rollout_type, mode, schedule_cfg
)
nn.Module.__init__(self)
self.num_vertices = self.search_space.num_vertices
self.embed_size = embed_size
self.node_embed = nn.Parameter(
1e-3 * torch.randn(self.num_vertices, self.embed_size)
)
self.gcn_layers = gcn_layers
self.mlp_layers = mlp_layers
self.mlp_size = (
[self.embed_size * 2] + mlp_size + [self.search_space.num_op_choices]
)
self.gcn = GCN(self.num_vertices, self.gcn_layers, self.embed_size)
self.mlp = MLP(self.num_vertices, self.mlp_layers, self.mlp_size)
self.prob = None
self.use_prob = use_prob
self.gumbel_hard = gumbel_hard
self.gumbel_temp = gumbel_temp
self.use_edge_norm = use_edge_norm
self.entropy_coeff = entropy_coeff
self.max_grad_norm = max_grad_norm
self.force_uniform = force_uniform
self.inspect_hessian_every = inspect_hessian_every
self.inspect_hessian = False
self.device = device
self.mode = mode
self.set_device(device)
self.set_mode(mode)
def on_epoch_start(self, epoch):
super(NasBench201GcnController, self).on_epoch_start(epoch)
if self.inspect_hessian_every >= 0 and epoch % self.inspect_hessian_every == 0:
self.inspect_hessian = True
def set_mode(self, mode):
self.mode = mode
def set_device(self, device):
self.device = device
self.to(torch.device(device))
def get_prob(self):
prob = self.gcn(self.node_embed)
prob = self.mlp(prob)
return prob
def forward(self, n=1):
return self.sample(n=n)
def sample(self, n=1, batch_size=None):
assert batch_size is None or batch_size == 1, "Do not support sample batch size for now"
self.probs = self.get_prob()
rollouts = []
for _ in range(n):
op_weights_list = []
sampled_list = []
logits_list = []
for prob in self.probs:
if self.force_uniform:
prob = torch.zeros_like(prob)
if self.use_prob:
sampled = F.softmax(prob / self.gumbel_temp, dim=-1)
else:
sampled, _ = utils.gumbel_softmax(
prob, self.gumbel_temp, hard=False
)
if self.gumbel_hard:
op_weights = utils.straight_through(sampled)
else:
op_weights = sampled
op_weights_list.append(op_weights)
sampled_list.append(utils.get_numpy(sampled))
logits_list.append(utils.get_numpy(prob))
arch_list = [
DiffArch(op_weights=op_weights, edge_norms=None)
for op_weights in op_weights_list
]
rollouts.append(
NasBench201DiffRollout(
arch_list, sampled_list, logits_list, self.search_space
)
)
return rollouts
def save(self, path):
torch.save({"epoch": self.epoch, "state_dict": self.state_dict()}, path)
self.logger.info("Saved controller network to %s", path)
def load(self, path):
checkpoint = torch.load(path, map_location=torch.device("cpu"))
self.load_state_dict(checkpoint["state_dict"])
self.on_epoch_start(checkpoint["epoch"])
self.logger.info("Loaded controller network from %s", path)
def _entropy_loss(self):
if self.entropy_coeff is not None:
probs = [F.softmax(prob, dim=-1) for prob in self.probs]
return self.entropy_coeff * sum(
-(torch.log(prob) * prob).sum() for prob in probs
)
return 0
def gradient(self, loss, return_grads=True, zero_grads=True):
if zero_grads:
self.zero_grad()
_loss = loss + self._entropy_loss()
_loss.backward()
if return_grads:
return utils.get_numpy(_loss), [
(k, v.grad.clone()) for k, v in self.named_parameters()
]
return utils.get_numpy(_loss)
def step_current_gradient(self, optimizer):
if self.max_grad_norm is not None:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.max_grad_norm)
optimizer.step()
def step_gradient(self, gradients, optimizer):
self.zero_grad()
named_params = dict(self.named_parameters())
for k, grad in gradients:
named_params[k].grad = grad
if self.max_grad_norm is not None:
torch.nn.utls.clip_grad_norm_(self.parameters(), self.max_grad_norm)
optimizer.step()
def step(self, rollouts, optimizer=None, perf_name="reward"):
self.zero_grad()
losses = [r.get_perf(perf_name) for r in rollouts]
[l.backward() for l in losses]
optimizer.step()
return np.mean([l.detach().cpu().numpy() for l in losses])
def summary(self, rollouts, log=False, log_prefix="", step=None):
return None
@classmethod
def supported_rollout_types(cls):
return ["nasbench-201-differentiable"]
class NasBench201EvoController(BaseController):
NAME = "nasbench-201-evo"
def __init__(
self,
search_space,
device,
rollout_type="nasbench-201",
mode="eval",
population_nums=100,
schedule_cfg=None,
):
super(NasBench201EvoController, self).__init__(
search_space, rollout_type, mode, schedule_cfg
)
# get the infinite iterator of the model matrix and ops
self.mode = mode
self.num_vertices = self.search_space.num_vertices
self.cur_solution = self.search_space.random_sample_arch()
self.population_nums = population_nums
self.population = collections.OrderedDict()
self.num_arch = len(self.search_space.api)
population_ind = np.random.choice(
np.arange(self.num_arch), size=self.population_nums, replace=False
)
for i in range(self.population_nums):
arch_res = self.search_space.api.query_by_index(population_ind[i])
accs = (
np.mean(
[
res.eval_acc1es["ori-test@199"]
for res in arch_res.query("cifar10").values()
]
)
/ 100.0
)
self.population[arch_res.arch_str] = accs
def reinit(self):
population_ind = np.random.choice(
np.arange(self.num_arch), size=self.population_nums, replace=False
)
for i in range(self.population_nums):
arch_res = self.search_space.api.query_by_index(population_ind[i])
accs = (
np.mean(
[
res.eval_acc1es["ori-test@199"]
for res in arch_res.query("cifar10").values()
]
)
/ 100.0
)
self.population[arch_res.arch_str] = accs
def set_init_population(self, rollout_list, perf_name):
# clear the current population
self.population = collections.OrderedDict()
for r in rollout_list:
self.population[r.genotype] = r.get_perf(perf_name)
def sample(self, n, batch_size=None):
assert batch_size is None
new_archs = sorted(self.population.items(), key=lambda x: x[1], reverse=True)
if self.mode == "eval":
best_sets = []
for n_r in range(n):
best_sets.append(
NasBench201Rollout(
self.search_space.api.str2matrix(new_archs[n_r][0]),
self.search_space,
)
)
return best_sets
rollouts = []
for n_r in range(n):
try_times = 0
while True:
rand_ind = np.random.randint(0, self.search_space.idx[0].shape[0])
neighbor_choice = np.random.randint(0, self.search_space.num_op_choices)
arch_mat = self.search_space.api.str2matrix(new_archs[n_r][0])
while (
neighbor_choice
== arch_mat[
self.search_space.idx[0][rand_ind],
self.search_space.idx[1][rand_ind],
]
):
neighbor_choice = np.random.randint(
0, self.search_space.num_op_choices
)
new_choice = copy.deepcopy(arch_mat)
new_choice[
self.search_space.idx[0][rand_ind],
self.search_space.idx[1][rand_ind],
] = neighbor_choice
try_times += 1
if self.search_space.genotype(new_choice) not in self.population.keys():
break
rollouts.append(NasBench201Rollout(new_choice, self.search_space))
return rollouts
@classmethod
def supported_rollout_types(cls):
return ["nasbench-201"]
def step(self, rollouts, optimizer, perf_name):
best_rollout = rollouts[0]
for r in rollouts:
if r.get_perf(perf_name) > best_rollout.get_perf(perf_name):
best_rollout = r
self.population.pop(list(self.population.keys())[0])
self.population[best_rollout.genotype] = best_rollout.get_perf(perf_name)
return 0
# ---- APIs that is not necessary ----
def set_mode(self, mode):
self.mode = mode
def set_device(self, device):
pass
def summary(self, rollouts, log=False, log_prefix="", step=None):
pass
def save(self, path):
pass
def load(self, path):
pass
class NasBench201SAController(BaseController):
NAME = "nasbench-201-sa"
def __init__(
self,
search_space,
device,
rollout_type="nasbench-201",
mode="eval",
temperature=1000,
anneal_coeff=0.98,
schedule_cfg=None,
):
super(NasBench201SAController, self).__init__(
search_space, rollout_type, mode, schedule_cfg
)
# get the infinite iterator of the model matrix and ops
self.num_vertices = self.search_space.num_vertices
self.temperature = temperature
self.anneal_coeff = anneal_coeff
# random sample as the init arch
self.cur_solution = self.search_space.random_sample_arch()
self.cur_perf = None
def reinit(self):
# random sample as the init arch
self.cur_solution = self.search_space.random_sample_arch()
self.cur_perf = None
def set_init_population(self, rollout_list, perf_name):
# set the initialization to the best rollout in the list
perf_list = [r.get_perf(perf_name) for r in rollout_list]
best_rollout = rollout_list[np.argmax(perf_list)]
self.cur_solution = best_rollout.arch
self.cur_perf = best_rollout.get_perf(perf_name)
self.logger.info(
"Set the initialization rollout: {}; perf: {}".format(
best_rollout, self.cur_perf
)
)
def sample(self, n, batch_size=None):
assert batch_size is None
if self.mode == "eval":
return [NasBench201Rollout(self.cur_solution, self.search_space)] * n
rollouts = []
for n_r in range(n):
rand_ind = | np.random.randint(0, self.search_space.idx[0].shape[0]) | numpy.random.randint |
import pandas as pd
import numpy as np
from pria_lifechem.evaluation import precision_auc_single, roc_auc_single, bedroc_auc_single, \
enrichment_factor_single, normalized_enrichment_factor_single
from pria_lifechem.function import reshape_data_into_2_dim
from sklearn import metrics
function_mapping = {'precision_auc_single': precision_auc_single,
'roc_auc_single': roc_auc_single,
'bedroc_auc_single': bedroc_auc_single}
docking_methods = ['dockscore_ad4', 'dockscore_dock6', 'dockscore_fred', 'dockscore_hybrid',
'dockscore_plants', 'dockscore_rdockint', 'dockscore_rdocktot', 'dockscore_smina', 'dockscore_surflex',
'consensus_dockscore_mean', 'consensus_dockscore_STD', 'consensus_dockscore_median',
'consensus_dockscore_max', 'consensus_dockscore_min']
docking_methods = ['consensus_bcs_efr1_opt', 'consensus_bcs_rocauc_opt',
'consensus_dockscore_max', 'consensus_dockscore_mean', 'consensus_dockscore_median',
'dockscore_ad4', 'dockscore_dock6', 'dockscore_fred', 'dockscore_hybrid',
'dockscore_plants', 'dockscore_rdockint', 'dockscore_rdocktot',
'dockscore_smina', 'dockscore_surflex']
def get_auc_table(file_path, target_name, auc_list, auc_header, title):
pria_pd = pd.read_csv(file_path)
title = '## {}'.format(title)
header = '| docking method |'
for name in auc_header:
header = '{} {} |'.format(header, name)
splitter = '| --- |'
for _ in auc_header:
splitter = '{} {} |'.format(splitter, '---')
content = ''
if target_name == 'Keck_Pria_AS_Retest':
ground = '../../output/docking/stage_1/lc123-pria-dockdata-qnorm.csv.gz'
elif target_name == 'Keck_Pria_FP_data':
ground = '../../output/docking/stage_1/lc123-pria-dockdata-qnorm.csv.gz'
elif target_name == 'Keck_RMI_cdd':
ground = '../../output/docking/stage_1/lc123-rmi-dockdata-qnorm.csv.gz'
else:
raise ValueError('Target name {} not found.'.format(target_name))
ground_pd = pd.read_csv(ground)
ground_pd = ground_pd[['Unnamed: 0', target_name]]
ground_pd.columns = ['molid', target_name]
pria_pd = pd.merge(pria_pd, ground_pd, on='molid', how='outer')
for docking_method in docking_methods:
# temp_pd = pria_pd[['Unnamed: 0', target_name, docking_method]]
temp_pd = pria_pd[['molid', target_name, docking_method]]
filtered_pd = temp_pd.dropna()
true_label_list = filtered_pd[target_name].tolist()
docking_ranked_list = filtered_pd[docking_method].tolist()
true_label_array = reshape_data_into_2_dim(np.array(true_label_list))
docking_ranked_array = reshape_data_into_2_dim( | np.array(docking_ranked_list) | numpy.array |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Modelagem em tempo real | COVID-19 no Brasil
--------------------------------------------
Ideias e modelagens desenvolvidas pela trinca:
. <NAME>
. <NAME>
. <NAME>
Esta modelagem possui as seguintes características:
a) NÃO seguimos modelos paramétricos => Não existem durante a epidemia dados
suficientes ou confiáveis para alimentar modelos epidemiológicos como a excelente
calaculadora http://gabgoh.github.io/COVID/index.html (ela serve para gerar cená-
rios e para modelar a epidemia DEPOIS que ela passar). Além disso, a natureza
exponencial das curvas as torna extremamente sensíveis aos parâmetros que a defi-
nem. Isso faz com que a confiabilidade preditiva desses modelos seja ilusória.
b) A evolução epidemia no Brasil começou depois da de outros países. Nossa mode-
lagem se apoia nesse fato. Com os dados disponíveis, procuramos no instante pre-
sente determinar quem estamos seguindo, ou seja, que países mais se pareceram
conosco passado o mesmo período de disseminação. A partir do que aconteceu nesses
países projetamos o que pode acontecer aqui.
c) Esta conta é refeita dia a dia. Dependendo de nossa competência em conter ou
não a disseminação do Covid-19 nos aproximaremos dos países que melhor ou pior
lidaram com a epidemia e a projeção refletirá essa similaridade.
d) As decisões de modelagem são indicadas no código com os zoinhos: # ◔◔ {...}
São pontos de partida para discutir a modelagem e propor alternativas.
"""
import datetime
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
sns.set()
# no ipython usar este comando antes de rodar => %matplotlib osx
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
__author__ = "<NAME>" # codigo
__copyright__ = "Copyright 2020"
__license__ = "New BSD License"
__version__ = "1.5.2"
__email__ = "<EMAIL>"
__status__ = "Experimental"
def preparar_dados(p1, uf="SP", cidade=u"São Paulo"):
u"""Busca dados e organiza tabela "data" com os dados de referência para a
modelagem.
Fontes:
. Mundo: https://covid.ourworldindata.org
. Brasil: https://brasil.io
Retorna:
raw <DataFrame> | Série completa do número de mortes/dia por país, sem trans-
posição temporal
inicio <Series> | Referência dos indexes em raw para justapor o início das
curvas dos diferentes países
data <DataFrame> | Série de número de mortes/dia por país trazendo para o
zero (index 0) o primeiro dia em que ocorrem pelo menos p1 mortes
(ver macro parâmetros). Isto reduz a quantidade de países para o grupo
que está à frente ou pareado ao Brazil. A partir do index 0 é possível
comparar a evolução dos casos entre os países.
nbr <int> | Número de dias da série de dados para o Brasil
"""
# ◔◔ {usamos as mortes diárias por parecer ser o dado mais confiável}
raw = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/new_deaths.csv").fillna(0.0)
# ◔◔ {o link abaixo carrega o acumulado de mortes, não usamos pq a soma vai alisando a série}
# raw_soma = pd.read_csv("https://covid.ourworldindata.org/data/ecdc/total_deaths.csv").fillna(0.0)
# tempo = raw['date'] # ◔◔ {não usamos as datas}
raw = raw.drop(columns='date')
raw = raw.drop(columns='World')
# para ver tbem os dados "oficias"
para_oficial = raw['Brazil']
# correcao de subnotificacao Brasil:
sub, hip = estimar_subnotificacao('Brasil')
p4br = ((sub + raw['Brazil'].sum()) / raw['Brazil'].sum())
raw['Brasil'] = raw['Brazil'] * p4br
# dict subs usa mesmas refs como chave => para reportar nos graficos
subs = {"Brasil": str(round(p4br, 1)) + " (" + hip + ")"}
# contruir base para a tabela "data"
inicio = raw.ge(p1).idxmax() # ◔◔ {encontra os index de qdo cada pais alcança p1}
data = pd.DataFrame({'Brasil':raw['Brasil'][inicio['Brasil']:]}).reset_index().drop(columns='index')
nbr = data.shape[0]
oficial = pd.DataFrame({'Brasil':para_oficial[inicio['Brasil']:]}).reset_index().drop(columns='index')
# dados Brasil
estados = [
'AC', 'AL', 'AP', 'AM', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MT', 'MS',
'MG', 'PA', 'PB', 'PR', 'PE', 'PI', 'RJ', 'RN', 'RS', 'RO', 'RR', 'SC',
'SP', 'SE', 'TO',
]
if uf not in estados or type(uf) is not str:
uf = "SP"
print(uf, u": UF inválida, usando 'SP'")
# ◔◔ {já baixamos filtrado para uf, mas pode se usar outros estados}
uf_data = pd.read_csv("https://brasil.io/dataset/covid19/caso?state="+uf+"&format=csv")
# adicionar dados da uf
uf_select = uf_data.loc[lambda df: df['place_type'] == "state", :]
uf_mortes = list(uf_select['deaths'].head(nbr + 1).fillna(0.0))
uf_mortes = [uf_mortes[i] - uf_mortes[i+1] for i in range(len(uf_mortes)-1)]
uf_mortes += [0 for _ in range(nbr-len(uf_mortes))] # corrigir tamanho
uf_mortes.reverse()
oficial[uf] = pd.Series(uf_mortes).values
sub_uf, hip_uf = estimar_subnotificacao(uf)
p4uf = ((sub_uf + pd.Series(uf_mortes).values.sum())/pd.Series(uf_mortes).values.sum())
data[uf] = pd.Series(uf_mortes).values * p4uf
subs[uf] = str(round(p4uf, 1)) + " (" + hip_uf + ")"
# adicionar dados da cidade
cidade_select = uf_data.loc[lambda df: df['city'] == cidade, :]
if cidade_select.shape[0] > 0:
cidade_mortes = list(cidade_select['deaths'].head(nbr + 1).fillna(0.0))
cidade_mortes = [cidade_mortes[i] - cidade_mortes[i+1] for i in range(len(cidade_mortes)-1)]
cidade_mortes += [0 for _ in range(nbr-len(cidade_mortes))] # corrigir tamanho
cidade_mortes.reverse()
if sum(cidade_mortes):
# subnotificacao para cidade => aprox pela do estado
oficial[cidade] = pd.Series(cidade_mortes).values
data[cidade] = pd.Series(cidade_mortes).values * p4uf
subs[cidade] = str(round(p4uf, 1)) + " (" + hip_uf + ")"
else:
subs["n/d"] = ""
print(u"AVISO: a cidade " + cidade + " não possui mortes confirmadas")
else:
subs["n/d"] = ""
print(u"AVISO: a cidade " + cidade + " não consta nos dados para esta UF")
print(u'Utilize uma das cidades disponíveis para o terceiro gráfico:')
for d in set(uf_data['city']):
print(d)
refs = list(subs.keys()) # as referencias validas...
# adicionar dados dos países à frente ou pareados ao Brasil
for k in inicio.keys():
if k == "Brasil": continue
if inicio[k] == 0 or inicio[k] > inicio["Brasil"]: continue
C = raw[k][inicio[k]:inicio[k]+nbr]
data[k] = C.values
return raw, inicio, data, nbr, subs, refs, oficial
def rodar_modelo(raw, inicio, data, nbr, p2, p3, ref, refs):
"""
Usa os dados preparados para gerar dados para visualização e a projeção da
evoluação da epidemia.
Retorna:
correlacionados <list>: Países mais correlacionados, usados para a projeção
calibrados <DataFrame>: Série alisada de mortes por dia com dados de ref e
países correlacionados
projetado <Array>: Série estimada para a evoluação da epidemia em ref
infos <dict>: informações sobre o pico estimado da epidemia
"""
# ◔◔ {Optamos por não alisar dados antes de calcular a correlação. Sabemos
# que a qualidade do report dos dados é variável, mas assumimos que o ruído
# é aleatório e por isso não é preciso alisar para que a correlação seja
# válida. Ao contrário, a correlação "bruta" seria a mais verossível}
# ◔◔ {mas caso você ache que vale a pena alisar antes, use o codigo abaixo}
# alisamento para os casos de morte reportados (média móvel)
# data = data.rolling(5).mean()
try: data = data.drop(columns='Brazil')
except: pass
# calcular a matriz de correlações:
pearson = data.corr()
# ◔◔ {o default do método usa a correlação de Pearson, cf. ref abaixo}
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.corr.html
# ◔◔ { não incluir os casos locais para evitar endogeneidade}
out = refs # nao misturar com os demais cortes locais
# selecionar os p2 países que melhor se correlacionam com a ref
correlacionados = [_ for _ in pearson[ref].sort_values(ascending=False).keys() if _ not in out][:p2]
# criar tabela, começa com dados da ref
calibrados = pd.DataFrame({ref:data[ref]})
# preencher com os dados dos países correlacionados
for k in correlacionados:
# ◔◔ {pega os dados em raw pq agora usaremos todos os dados disponíveis para o país}
C = raw[k][inicio[k]:]
additional = pd.DataFrame({k: C.values}) # array
calibrados = pd.concat([calibrados, additional], axis=1)
# ◔◔ {aqui usamos um alisamento p3 de dias para deixar a visualização melhor}
calibrados = calibrados.rolling(p3).mean()
# ◔◔ {a projeção usa os dados alisados}
# ◔◔ {como é feita a projeção:
# 1. cada país correlacionado terá um peso, proporcianal a quanto se correlaciona
# .. soma dos pesos = 1
# .. quanto mais correlacionado, maior o peso }
pesos = [pearson[ref][c] for c in correlacionados] # melhor corr pesa mais
pesos = [pesos[i]/sum(pesos) for i in range(len(pesos))] # pesos normalizados
pesos = dict(zip(correlacionados, pesos)) # num dict para facilitar
# proj <list>: vai ter ao final o tamanho da maior serie em calibrados
proj = [np.nan for _ in range(nbr)] # começa com nan onde já temos os dados da ref
proj[-1] = calibrados[ref][nbr - 1] # primeiro valor coincide com último de ref
# será a partir daí que começa a projeção
# ◔◔ {a projeção segue dia a dia as variações dos países correlacionado}
for d in range(nbr, calibrados.shape[0]):
x = 0 # incremento estimado para o dia
for c in correlacionados:
if not np.isnan(calibrados[c][d]):
# adiciona o incremento % do país ponderado por seu peso
x += (calibrados[c][d]/calibrados[c][d-1]) * pesos[c]
else:
# ◔◔ {qdo acabam os dados de um país ele pára de influenciar a taxa}
x += 1 * pesos[c]
# print(d, c, x)
# a série da projeção é construída aplicando o incremento estimado ao dia anterior
proj.append(proj[-1] * x)
# projetado <Array>
projetado = | np.array(proj) | numpy.array |
# coding=utf-8 python3.6
# ================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
# license : MIT License
# Author : haibingshuai
# Created date: 2019/10/29 15:19
# Description :
# ================================================================
import tensorflow as tf
import numpy as np
import cv2
import random
import colorsys
from core.model_config import config
def bbox_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = | np.array(boxes2) | numpy.array |
# coding=utf-8
# Copyright 2021 The SLOE Logistic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run experiment to understand coverage of CIs generated by SLOE.
Tests the SLOE estimator empirically by computing
confidence intervals (CIs) using it over a bunch of different seeds and aspect
ratios, calculating properties such as coverage and size, and storing in csv
files to be analyzed in a colab.
"""
from absl import app
from absl import flags
import apache_beam as beam
from apache_beam.options import pipeline_options
import numpy as np
import sklearn.linear_model
from sklearn.model_selection import LeaveOneOut
from sloe_logistic import probe_frontier
from sloe_logistic import unbiased_logistic_regression
import sloe_logistic.sloe_experiments.experiment_helpers as exp_helper
GAMMA_RANGE = [0.1, 1, 5]
FLAGS = flags.FLAGS
flags.DEFINE_integer('num_sims', 100, 'number of simulations to run')
flags.DEFINE_string('output_path', '/tmp/counts', 'The output file path')
flags.DEFINE_enum(
'coverage_target', 'true_preds', ['true_preds', 'calib_ests', 'reg_ests'],
'Which value to check coverage in prediction intervals?')
flags.DEFINE_boolean('include_bootstrap', False,
'Include bootstrap CIs as well? These are slow.')
flags.DEFINE_float(
'kappa_spacing', 0.05,
'Resolution of graph in terms of spacing between kappa evaluated.')
flags.DEFINE_float(
'coverage_rate', 95, 'What level confidence intervals'
'should be tested (0-100)?')
def run_sim(params):
"""Runs simulation and computes properties of the estimated CIs."""
kappa = params[0]
gamma = params[1]
seed = 201216 + params[2]
sim_params = exp_helper.SimulationParams.create_from_flags()
sim_params.seed = seed
sim_params.gamma = np.sqrt(gamma)
sim_params.p = int(sim_params.training_n * kappa)
sim = exp_helper.create_sim(sim_params)
x1, y1 = sim.sample()
pfr = probe_frontier.ProbeFrontierLogisticRegression()
if pfr.is_separable(x1, y1):
return
# Draw test data
x2, _ = sim.sample(int(sim_params.training_n / 4))
true_logits = x2.dot(sim.beta)
bias_selector = np.abs(true_logits) > 1e-2
# Calculate coverage
if FLAGS.coverage_target == 'true_preds':
target = 1.0 / (1.0 + np.exp(-true_logits)).reshape(-1)
elif FLAGS.coverage_target == 'calib_ests':
ps_logit_model = unbiased_logistic_regression.PlattScaledLogisticRegression(
fit_intercept=sim_params.intercept or sim_params.uncentered)
ps_logit_model.fit(x1, y1)
target = ps_logit_model.predict_proba(x2)[:, 1]
elif FLAGS.coverage_target == 'reg_ests':
ps_logit_model = sklearn.linear_model.LogisticRegressionCV(
cv=LeaveOneOut(),
fit_intercept=False,
Cs=20,
penalty='l2',
solver='newton-cg')
ps_logit_model.fit(x1, y1)
target = ps_logit_model.predict_proba(x2)[:, 1]
else:
raise ValueError("Invalid choice of coverage target '{}'.".format(
FLAGS.coverage_target))
try:
new_method_model = exp_helper.create_inference_model('newmethod')
new_method_model.set_coverage(FLAGS.coverage_rate)
_ = new_method_model.fit(x1, y1)
new_pred_int = new_method_model.prediction_intervals(x2)
new_logit_int = new_method_model.prediction_intervals(x2, logit=True)
except ValueError as e:
print(e)
return
std_method_model = exp_helper.create_inference_model('mle')
std_method_model.set_coverage(FLAGS.coverage_rate)
_ = std_method_model.fit(x1, y1)
std_pred_int = std_method_model.prediction_intervals(x2)
std_logit_int = std_method_model.prediction_intervals(x2, logit=True)
new_coverage = np.logical_and(
new_pred_int[:, 0].reshape(-1) <= target,
target <= new_pred_int[:, 2].reshape(-1)).astype(float)
std_coverage = np.logical_and(
std_pred_int[:, 0].reshape(-1) <= target,
target <= std_pred_int[:, 2].reshape(-1)).astype(float)
new_width = np.abs(new_logit_int[:, 2] - new_logit_int[:, 0])
std_width = np.abs(std_logit_int[:, 2] - std_logit_int[:, 0])
new_bias = new_logit_int[bias_selector, 1] / true_logits[bias_selector]
std_bias = std_logit_int[bias_selector, 1] / true_logits[bias_selector]
results = [
gamma, kappa, seed,
| np.mean(new_coverage) | numpy.mean |
import re
import pandas as pd
import numpy as np
import pathlib
from collections import OrderedDict
from pyutil import read_table, intersection
BASE_PAIR = {
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
}
def check_flip(a1, a2, b1, b2):
res = []
for _a1, _a2, _b1, _b2 in zip(a1, a2, b1, b2):
res.append(_check_flip(_a1, _a2, _b1, _b2))
return np.array(res)
def _check_flip(a0, a1, b0, b1):
'''
check if (a0, a1) and (b0, b1) are of the same direction.
If there is nan or they don't match at all or ambiguious return nan
Else if they are in the same direction, return 1
Else return -1
'''
if a0 is np.nan or a1 is np.nan or b0 is np.nan or b1 is np.nan:
return np.nan
# remove ambiguious first.
if a0 == BASE_PAIR[a1] or b0 == BASE_PAIR[b1]:
return np.nan
# exact match
if a0 == b0 and a1 == b1:
return 1
# flip
if a0 == b1 and a1 == b0:
return -1
# compliment match
if a0 == BASE_PAIR[b0] and a1 == BASE_PAIR[b1]:
return 1
# compliment flip
if a0 == BASE_PAIR[b1] and a1 == BASE_PAIR[b0]:
return -1
# if all above does not return, it has to be invalid.
return np.nan
def rearrage_df_by_target(df, target, df_value_cols):
df_res = target[['snpid', 'chr', 'effect_allele', 'non_effect_allele']]
df_res = pd.merge(
df_res, df,
on=['snpid', 'chr'],
suffixes=['_res', '_df'],
how='left'
)
flip_factor = check_flip(
a1=df_res.effect_allele_res,
a2=df_res.non_effect_allele_res,
b1=df_res.effect_allele_df,
b2=df_res.non_effect_allele_df
)
# we need to carry the missingness when we move on
with np.errstate(invalid='ignore'):
df_res[df_value_cols] = df_res[df_value_cols] * flip_factor[:, np.newaxis]
df_res.drop(
columns=['effect_allele_df', 'non_effect_allele_df'], inplace=True
)
df_res.rename(
columns={
'effect_allele_res': 'effect_allele',
'non_effect_allele_res': 'non_effect_allele'
},
inplace=True
)
return df_res
def harmonize_gwas_and_weight(gwas, weight):
'''
Harmonize GWAS to weight SNP set.
But only keep the ones that present in both.
'''
df_common = pd.merge(
gwas[['snpid', 'chr', 'effect_allele', 'non_effect_allele']],
weight[['snpid', 'chr', 'effect_allele', 'non_effect_allele']],
on=['snpid', 'chr'],
suffixes=['_gwas', '_weight']
)
flip_factor = check_flip(
a1=df_common.effect_allele_gwas,
a2=df_common.non_effect_allele_gwas,
b1=df_common.effect_allele_weight,
b2=df_common.non_effect_allele_weight
)
# need to remove the invalid variant before moving on
to_keep_ind = np.logical_not(np.isnan(flip_factor))
df_common = df_common[ to_keep_ind ].reset_index(drop=True)
flip_factor = flip_factor[ to_keep_ind ]
df_common.drop(columns=['effect_allele_gwas', 'non_effect_allele_gwas'], inplace=True)
df_common.rename(columns={'effect_allele_weight': 'effect_allele', 'non_effect_allele_weight': 'non_effect_allele'}, inplace=True)
df_gwas = pd.merge(
df_common[['snpid', 'chr', 'effect_allele', 'non_effect_allele']],
gwas.drop(columns=['effect_allele', 'non_effect_allele']),
on=['snpid', 'chr']
)
df_gwas.effect_size = df_gwas.effect_size * flip_factor
df_weight = pd.merge(
df_common[['snpid', 'chr', 'effect_allele', 'non_effect_allele']],
weight.drop(columns=['effect_allele', 'non_effect_allele']),
on=['snpid', 'chr']
)
return df_gwas, df_weight
def _parse_args(args_list, desired_cols=None, no_raise=False):
fn = args_list[0]
if not pathlib.Path(fn).is_file():
raise ValueError('Filename is wrong. Cannot find the file.')
dict = {}
snpid_name = None
desired_cols_tmp = []
for i in args_list[1:]:
tmp = i.split(':')
if len(tmp) != 2:
raise ValueError('Wrong gwas args list. Need [col]:[name] pairs.')
col, name = tmp
if desired_cols is None:
desired_cols_tmp.append(col)
elif col not in desired_cols:
if no_raise is True:
continue
else:
raise ValueError(f'Wrong col = {col}.')
dict[col] = name
rename_dict = OrderedDict()
if desired_cols is None:
desired_cols = desired_cols_tmp
for dd in desired_cols:
if dd not in dict:
if no_raise is True:
continue
else:
raise ValueError(f'Need to have col = {dd}.')
rename_dict[dict[dd]] = dd
return fn, rename_dict
def _parse_gwas_args(args_list, mode='effect_size'):
if mode == 'effect_size':
have_effect_size = True
elif mode == 'zscore':
have_effect_size = False
else:
raise ValueError(f'Wrong loading mode for GWAS file: mode = {mode}')
# for kk in args_list:
# if 'effect_size:' in kk:
# have_effect_size = True
if have_effect_size is True:
desired_cols = [
'snpid', 'non_effect_allele', 'effect_allele',
'effect_size', 'effect_size_se', 'chr'
]
else:
desired_cols = [
'snpid', 'non_effect_allele', 'effect_allele',
'zscore', 'allele_frequency', 'sample_size', 'chr'
]
fn, rename_dict = _parse_args(args_list, desired_cols, no_raise=True)
for k, v in rename_dict.items():
if v == 'snpid':
snpid_name = k
break
return fn, rename_dict, snpid_name
def get_snpid_col(gwas_args_list):
for i in gwas_args_list:
if 'snpid:' in i:
_, tmp = i.split(':')
return tmp
def impute_b_from_z(zscore, af, n):
se = 1 / np.sqrt(2 * n * af * (1 - af))
bhat = zscore * se
return bhat, se
def clean_up_chr(ll):
for i in range(len(ll)):
ll[i] = re.sub('chr', '', ll[i])
return ll
def get_key_by_val(val, dict_):
for i in dict_.keys():
if dict_[i] == val:
return i
return None
def load_gwas(gwas_args_list):
snpid_col = get_snpid_col(gwas_args_list[1:])
# fn = gwas_args_list[0]
fn, rename_dict = _parse_args(gwas_args_list, desired_cols=None)
df = read_table(fn, indiv_col=snpid_col)
k_effect_size = get_key_by_val('effect_size', rename_dict)
k_zscore = get_key_by_val('zscore', rename_dict)
if k_effect_size is not None and k_effect_size in df.columns:
_, rename_dict, snpid_col = _parse_gwas_args(gwas_args_list, mode='effect_size')
elif k_zscore is not None and k_zscore in df.columns:
_, rename_dict, snpid_col = _parse_gwas_args(gwas_args_list, mode='zscore')
else:
raise ValueError('We need either effect_size or zscore in GWAS file.')
df.rename(columns={'indiv': snpid_col}, inplace=True)
df.rename(columns=rename_dict, inplace=True)
df.drop_duplicates('snpid', inplace=True)
df.chr = clean_up_chr(list(df.chr.astype(str)))
if 'effect_size' not in rename_dict.values():
df['effect_size'], df['effect_size_se'] = impute_b_from_z(df.zscore, df.allele_frequency, df.sample_size)
# some qc on gwas
# remove se with 0 or inf
# remove effect size with na
df.effect_size_se.replace([0, np.inf, -np.inf], np.nan, inplace=True)
df = df[ (~ df.effect_size.isna()) & (~ df.effect_size_se.isna()) ].reset_index(drop=True)
desired_cols = [
'snpid', 'non_effect_allele', 'effect_allele',
'effect_size', 'effect_size_se', 'chr'
]
return df[desired_cols]
def _parse_idp_args(args_list):
desired_cols = [
'snpid', 'non_effect_allele', 'effect_allele', 'chr'
]
fn, rename_dict = _parse_args(args_list, desired_cols)
return fn, rename_dict
def load_idp(args_list):
fn, rename_dict = _parse_idp_args(args_list)
df = pd.read_parquet(fn)
df.rename(columns=rename_dict, inplace=True)
df.chr = df.chr.astype(str)
return df
def load_cov_meta(fn):
fn = '.'.join(fn.split('.')[:-1])
fn = fn + '.snp_meta.parquet'
return pd.read_parquet(fn)
def _to_list(var):
if not isinstance(var, list):
return [ var ]
else:
return var
def cleanup_idp_grp_dict(idp_grp_dict, idp_names):
'''
Check if keys and values in idp_grp_dict appear in idp_names.
If not, remove the key or value.
Return the cleaned up idp_grp_dict.
'''
to_drop = []
for k in idp_grp_dict.keys():
if 'covariates' not in idp_grp_dict[k] or 'x' not in idp_grp_dict[k]:
raise ValueError('For each entry, we require covariates and x.')
idp_grp_dict[k]['covariates'] = _to_list( idp_grp_dict[k]['covariates'] )
idp_grp_dict[k]['x'] = _to_list( idp_grp_dict[k]['x'] )
lc = intersection(idp_grp_dict[k]['covariates'], idp_names)
lx = intersection(idp_grp_dict[k]['x'], idp_names)
if len(lc) > 0 and len(lx) > 0:
idp_grp_dict[k]['covariates'] = lc
idp_grp_dict[k]['x'] = lx
else:
to_drop.append(k)
for k in to_drop:
del idp_grp_dict[k]
return idp_grp_dict
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='run_simagexcan.py', description='''
Run S-ImageXcan with pre-computed genotype covariance.
Need to export PYTHONPATH=path-to/imagexcan:path-to/misc-tools/pyutil
''')
parser.add_argument('--genotype_covariance', help='''
The genotype covariance computed in build_genotype_covariance.py
Accept wildcard {chr_num}.
Will automatically search for the corresponding meta SNP file.
''')
parser.add_argument('--gwas', nargs='+', help='''
Need to have column names for:
snpid, non_effect_allele, effect_allele,
effect_size, effect_size_se, chr.
If there is no effect_size avaliable, it could
impute effect_size from zscore, allele_frequency,
sample_size.
The format is: snpid:rsid_col, ..., chr:chr
''')
parser.add_argument('--idp_weight', nargs='+', help='''
The IDP weight table is in parquet format.
It contains columns:
snpid, effect_allele, non_effect_allele, chr.
Along with all other columns for the IDPs.
Specify the column names, e.g.: snpid:rsID, ..., chr:chr
''')
parser.add_argument('--idp_yaml', default=None, help='''
A YAML file telling which PC is for which set of IDPs.
Example:
set1:
covariates:
- PC1
- PC2
x:
- IDP1
- IDP2
set2:
...
''')
parser.add_argument('--output', help='''
The output CSV filename.
Will return both marginal test result and also the susieR result.
''')
parser.add_argument('--z_ld_weight', type=float, default=1e-4, help='''
LD = (1 - z_ld_weight) * LD + z_ld_weight * (Z @ Z.T)
to avoid mis-specified LD.
''')
args = parser.parse_args()
from tqdm import tqdm
import logging, time, sys, os
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p'
)
from CovConstructor import CovMatrix
from susie_wrapper import run_susie_wrapper
from pystat import z2p
from pyutil import read_yaml
logging.info('Loading GWAS.')
df_gwas = load_gwas(args.gwas)
# df_gwas columns:
# snpid, non_effect_allele, effect_allele,
# effect_size, effect_size_se, chr
logging.info('GWAS SNP = {}'.format(df_gwas.shape[0]))
logging.info('Loading IDP weights.')
df_weight = load_idp(args.idp_weight)
idp_names = list(df_weight.columns[4:])
nidp = len(idp_names)
logging.info('IDP SNP = {} and number of IDPs = {}'.format(df_weight.shape[0], nidp))
logging.info('Harmonizing GWAS and IDP weights.')
# harmonize GWAS and IDP weight table so that they have the same set of
# SNPs (including direction).
df_gwas, df_weight = harmonize_gwas_and_weight(df_gwas, df_weight)
logging.info('{} SNPs left after harmonizing GWAS and IDP weights.'.format(df_gwas.shape[0]))
if args.idp_yaml is not None:
logging.info('Loading IDP YAML.')
idp_grp_dict = read_yaml(args.idp_yaml)
idp_grp_dict = cleanup_idp_grp_dict(idp_grp_dict, idp_names)
logging.info('There are {} IDP sets'.format(len(idp_grp_dict.keys())))
else:
idp_grp_dict = None
# please refer to https://github.com/hakyimlab/yanyu-notebook/blob/master/notes/date_112420.Rmd
# for the details of the S-ImageXcan formula
# to take the following procedure.
# 0. subset IDP and GWAS SNPs.
# 1. Per chromosome
# 1.1 obtain D(chr), S_R(chr), and var_R(chr).
# 1.2 compute numer_b(chr) = Gamma(chr).T @ (var_R(chr) * b_gwas(chr))
# 1.3 compute numer_z(chr) = Gamma(chr).T @ (S_R(chr) * z_gwas(chr))
# 2. compute marginal test.
# 2.1 D = sum_chr D(chr), var_D = diag(D), S_D = sqrt(var_D)
# 2.2 beta_imagexcan = ( sum_chr numer_b(chr) ) / var_D
# 2.3 z_imagexcan = ( sum_chr numer_z(chr) ) / S_D
# 3. run susieR.
# 3.1 Sigma = D / S_D[:, np.newaxis] / S_D[np.newaxis, :]
# also, we do an extension of the marginal test where we account for PCs when testing one IDP at a time.
# for the details of the formula see:
# https://github.com/hakyimlab/yanyu-notebook/blob/master/notes/date_041421.Rmd
D = np.zeros((nidp, nidp))
numer_b = np.zeros((nidp))
numer_z = np.zeros((nidp))
for i in range(1, 23):
df_gwas_sub = df_gwas[ df_gwas.chr == str(i) ].reset_index(drop=True)
df_weight_sub = df_weight[ df_weight.chr == str(i) ].reset_index(drop=True)
if df_gwas_sub.shape[0] == 0:
continue
logging.info(f'Chromosome {i}: Loading genotype covariance meta information.')
df_cov_meta = load_cov_meta(args.genotype_covariance.format(chr_num=i))
# step0
n0 = df_weight_sub.shape[0] # for book keeping
# we enforce the GWAS table and the IDP weights to have
# the same SNPs as genotype covariance
# the weights of the missing ones are set to NaN.
df_gwas_sub = rearrage_df_by_target(
df=df_gwas_sub,
target=df_cov_meta,
df_value_cols=['effect_size']
)
df_weight_sub = rearrage_df_by_target(
df=df_weight_sub,
target=df_cov_meta,
df_value_cols=list(df_weight.columns[4:])
)
n1 = df_gwas_sub.effect_size.notna().sum()
logging.info('Step0 Chromosome {}: {} out of {} SNPs in IDP/GWAS are used.'.format(i, n1, n0))
logging.info(f'Step1 Chromosome {i}: Working with genotype covariance.')
weight = df_weight_sub.iloc[:, 4 : ].to_numpy(copy=True)
weight[np.isnan(weight)] = 0
b_gwas = df_gwas_sub.effect_size.to_numpy(copy=True)
b_gwas[np.isnan(b_gwas)] = 0
se_gwas = df_gwas_sub.effect_size_se.to_numpy(copy=True)
se_gwas[ | np.isnan(se_gwas) | numpy.isnan |
# -*- coding: utf-8 -*-
"""
Nutrislice Extractor
Created on Mon Oct 28 08:32:54 2019
@author: carverjc
This software is used to extract menu items from Nutrislice Menus. See README.md for details on usage.
"""
import cv2, sys
import pytesseract
import numpy as np
import os
import pandas as pd
import skimage
import time
from skimage import io
import copy
from matplotlib import pyplot as plt
from os.path import join
from os import makedirs
from glob import glob
# MUST CHANGE THESE TWO
os.environ["TESSDATA_PREFIX"] = "PATH_TO_TESSDATA"
pytesseract.pytesseract.tesseract_cmd = "PATH_TO_tesseract.exe"
def find_lines (img, length_of_run = 20, distance = 100):
runs = [(-1)*(distance + 1)]
for i in range(IMG_WIDTH):
for j in range(IMG_HEIGHT):
run_length = 0
if np.all(img[j,i] == 0.0) and i - runs[-1] > distance:
for run in range(length_of_run):
try:
if np.all(img[j + run, i] == 0.0):
run_length += 1
except IndexError:
break
if run_length == length_of_run:
runs.append(i)
break
return runs[1:] #list(dict.fromkeys(runs))
def greatest_line (img):
IMG_WIDTH = img.shape[:2][1]
IMG_HEIGHT = img.shape[:2][0]
max_list = []
for i in range(IMG_WIDTH):
total_col_max = 0
for j in range(IMG_HEIGHT):
max_run = 0
if np.all(img[j,i] == 0.0):
new_index = j
try:
while np.all(img[new_index,i] == 0.0):
max_run += 1
new_index += 1
except IndexError:
continue
if max_run > total_col_max:
total_col_max = max_run
max_list.append(total_col_max)
return max_list
def calculate_pixels (img, find_row = True, derivative = False):
row_mean = []
if find_row == True:
for i in range(IMG_HEIGHT):
intermediate_sum = 0
for j in range(IMG_WIDTH):
intermediate_sum = intermediate_sum + img[i,j][0]
row_mean.append(intermediate_sum / IMG_WIDTH)
else:
for i in range(IMG_WIDTH):
intermediate_sum = 0
for j in range(IMG_HEIGHT):
intermediate_sum = intermediate_sum + img[j,i][0]
row_mean.append(intermediate_sum / IMG_HEIGHT)
if derivative == True:
for i in range(len(row_mean) - 1):
row_mean[i] = row_mean[i + 1] - row_mean[i]
row_mean = row_mean[:-1]
return row_mean
def plot_df (df, title="", xlabel='Pixel Index', ylabel='Pixel Value', dpi=100):
df = pd.DataFrame(df)
df.index.name = xlabel
df.reset_index(inplace=True)
df.columns = [xlabel, ylabel]
plt.figure(figsize=(16,5), dpi=dpi)
plt.plot(df[xlabel], df[ylabel], color='tab:red')
plt.gca().set(title=title, xlabel=xlabel, ylabel=ylabel)
plt.title("Mean Horizontal Pixel Value From Top of Image")
plt.show()
def cut_finder (df, max_pixel, distance, find_black = True):
cuts = []
cuts = [(-1)*distance]
for i in range(len(df)):
if find_black:
if df[i] < max_pixel and (i - cuts[-1]) > distance:
cuts.append(i)
else:
if df[i] < max_pixel and (i - cuts[-1]) > distance:
if len(cuts) == 1:
cuts.append(i - 20)
elif len(cuts) > 1:
if len(cuts) > 2:
cuts.remove(cuts[-1])
intermediate_cut = []
cuts.append(i)
intermediate_cut = copy.copy(df[cuts[-2]:cuts[-1]])
cuts[-1] = cuts[-2] + intermediate_cut.index(max(intermediate_cut))
cuts.append(i)
else:
continue
return list(dict.fromkeys(cuts[1:]))
def findnth (haystack, needle, n):
parts= haystack.split(needle, n+1)
if len(parts)<=n+1:
return -1
return len(haystack)-len(parts[-1])-len(needle)
def isNaN(num):
return num != num
def ocr (image_file):
image_file0 = image_file[:-4]
os.chdir(pathog)
image = cv2.imread(image_file0 + '.jpg')
os.chdir(pathnew)
in_dir = (pathnew)
config = '--oem 1 --psm 10 -c tessedit_char_whitelist=0123456789'
configLetters = '--oem 1 --psm 3 tessedit_char_whitelist=abcdefghijklmnopqrstuvwxyz'
OCR = pytesseract.image_to_string(image, lang='eng', config = configLetters)
matchers_month = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September',
'October', 'November', 'December']
matchers_year = ['2017', '2018', '2019']
OCR = OCR.replace('\n', ' ')
OCR = OCR.replace('/', ' ')
OCR1 = OCR.split(' ')
try:
matching_month = [s for s in OCR1 if any(xs in s for xs in matchers_month)][0]
except IndexError:
matching_month = ("October")
try:
matching_year = [s for s in OCR1 if any(xs in s for xs in matchers_year)][0]
except IndexError:
matching_year = ('2017')
file_name_string = image_file0.split('_')
state = pathog.split('/')[-1]
if 'district' in image_file:
index = file_name_string.index('district')
county = ' '.join(file_name_string[:(index + 1)])
elif 'county' in image_file:
index = file_name_string.index('county')
county = ' '.join(file_name_string[:(index + 1)])
elif 'city' in image_file:
index = file_name_string.index('city')
county = ' '.join(file_name_string[:(index + 1)])
elif 'borough' in image_file:
index = file_name_string.index('borough')
county = ' '.join(file_name_string[:(index + 1)])
elif 'County' in image_file:
index = file_name_string.index('County')
county = ' '.join(file_name_string[:(index + 1)])
elif 'City' in image_file:
index = file_name_string.index('City')
county = ' '.join(file_name_string[:(index + 1)])
elif 'Borough' in image_file:
index = file_name_string.index('Borough')
county = ' '.join(file_name_string[:(index + 1)])
elif 'District' in image_file:
index = file_name_string.index('District')
county = ' '.join(file_name_string[:(index + 1)])
elif 'DISTRICT' in image_file:
index = file_name_string.index('DISTRICT')
county = ' '.join(file_name_string[:(index + 1)])
elif 'menu' in image_file:
county = ' '.join(file_name_string[:2])
elif matching_year in image_file:
index = file_name_string.index(matching_year)
county = ' '.join(file_name_string[:index])
else:
county = file_name_string[0]
if 'lunch' in OCR:
meal = 'lunch'
elif 'breakfast' in OCR:
meal = 'breakfast'
else:
meal = "lunch"
preface = (state + ';' + county + ';' + matching_year + ';' + matching_month + ';')
filename = (in_dir + image_file[:-13] + '.txt')
headers = ('State;County;Year;Month;Date;Type;Item;Sodium\n')
totalfile = open(filename, "w+")
totalfile.write(headers)
totalfile.close()
number_crop = 40
for image_file2 in glob(f'*.jpg'):
image = cv2.imread(image_file2)
thresh, image = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)
image2 = 255*(image < 128).astype(np.uint8)
image2_final = image2[:number_crop,:]
image_final = image[number_crop:,:]
# See if there is anything
OCR = pytesseract.image_to_string(image_final, lang='eng', config = configLetters)
if len(OCR) < 10:
print("No data: skipped")
continue
OCR = ''
length_of_run = max(greatest_line(image)) - 5
image2_final = image2[:length_of_run,:]
image_final = image[length_of_run:,:]
OCR = pytesseract.image_to_string(image2_final, lang='eng', config = config)
date = copy.copy(OCR)
preface_interm = (preface + date + ';' + meal + ';')
OCR = pytesseract.image_to_string(image_final, lang='eng', config = configLetters)
if "Sodium" in OCR:
OCR = OCR.replace('\n(', '(')
OCR = OCR.replace('\n(', '(')
OCR = OCR.split('\n')
OCR_new = []
for i in range(len(OCR)):
#if 'mg' in OCR[i] and len(OCR[i]) > 7:
if len(OCR[i]) > 7:
OCR_new.append(OCR[i])
for i in range(len(OCR_new)):
if 'mg' in OCR_new[i]:
OCR_new[i] = OCR_new[i].replace('(', ';')
OCR_new[i] = OCR_new[i].replace('mg', '')
else:
OCR_new[i] = OCR_new[i] + ';'
OCR_new = '\n'.join(OCR_new)
OCR_new += '\n'
OCR = OCR_new
else:
OCR = OCR.replace('\n\n','\n')
OCR += '\n'
OCR = OCR.replace('\n',';\n')
OCR = OCR.replace('Sodium','')
OCR = OCR.replace(')','')
OCR = OCR.replace('}','')
OCR = OCR.replace(']','')
OCR = '\n' + OCR
OCR = OCR.replace('\n', '\n' + preface_interm)
OCR = OCR[:OCR.rfind(state)]
OCR = OCR.replace('+ ','')
OCR = OCR.replace('« ','')
OCR = OCR.replace('* ','')
OCR = OCR.replace('» ','')
OCR = OCR.replace('+','')
OCR = OCR.replace('«','')
OCR = OCR.replace('*','')
OCR = OCR.replace('»','')
OCR = OCR.replace('é','')
OCR = OCR.replace('©','')
OCR = OCR[1:]
test = OCR.split('\n')
for line in range(len(test)):
if test[line].count(';') > 7:
cutindex = findnth(test[line], ';', 7)
test.insert(line + 1, preface_interm + test[line][cutindex + 1:])
test[line] = test[line][:cutindex]
OCR = '\n'.join(test)
OCR = OCR.encode('utf-8').strip()
OCR += (b'\n')
myfile = open(in_dir + '{}.txt'.format(image_file2[:-4]), "w+")
print(OCR, file=myfile)
myfile.close()
totalfile2 = open(filename, "ab+")
totalfile2.write(OCR)
totalfile2.close()
txt_file = filename
csv_file = (filename[:-4] + '.csv')
dataframe = pd.read_csv(txt_file, delimiter=";")
try:
for rows in range(dataframe.shape[0]):
if (not isNaN(dataframe['Item'][rows])) and dataframe['Item'][rows][0] == '-':
dataframe['Item'][rows] = copy.copy(dataframe['Item'][rows][1:])
try:
dataframe['Sodium'] = dataframe['Sodium'].replace({'o': '0'}, regex=True)
dataframe['Sodium'] = dataframe['Sodium'].replace({'O': '0'}, regex=True)
dataframe['Sodium'] = dataframe['Sodium'].replace({'S': ''}, regex=True)
dataframe['Sodium'] = dataframe['Sodium'].replace({'f': '1'}, regex=True)
dataframe['Sodium'] = dataframe['Sodium'].replace({'wi': ''}, regex=True)
dataframe['Sodium'] = dataframe['Sodium'].replace({'%': '1'}, regex=True)
except TypeError:
print("Non Sodium File")
#Creates the 'entree' variable using a ML method
try:
dataframe.to_csv(csv_file, encoding='utf-8', index=False)
except UnicodeDecodeError:
try:
dataframe.to_csv(csv_file, index=False)
except UnicodeDecodeError:
print("Couldn't Create CSV")
except UnicodeDecodeError:
print("Couldn't Create CSV")
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
real_og_path = os.getcwd()
path = (os.getcwd())
os.chdir(os.getcwd() + '\\images\\')
for state in glob(os.getcwd() + '\\**'):
pathog = '/'.join(state.split('\\'))
os.chdir(pathog)
print(pathog)
try:
os.mkdir(path + '\\2017\\' + pathog.split('/')[-1])
except FileExistsError:
print("State Folder Exists")
for image_file in glob(f'*.jpg'):
os.chdir(pathog)
print(image_file)
print("Splitting Rows")
path = (path + '\\2017\\' + pathog.split('/')[-1] + '\\' + image_file[:-4] + '\\rows\\')
MISSING_LAST_ROW = True
gray = cv2.imread(image_file)
gray2 = copy.copy(gray)
IMG_WIDTH = gray.shape[:2][1]
IMG_HEIGHT = gray.shape[:2][0]
for i in range(IMG_WIDTH):
for j in range(IMG_HEIGHT):
if not(np.all(gray[j,i] < 1.0)):
gray2[j,i] = | np.array([255,255,255]) | numpy.array |
from matplotlib import pyplot as plt
import numpy as np
import spikeforest
class CrossCorrelogramsWidget:
def __init__(self, max_samples=None, auto=True, *, sorting, samplerate, unit_ids=None, _figure=None, _axs=None):
self._SX = sorting
self._unit_ids = unit_ids
self._figure = _figure
self._axs = _axs
if self._figure is not None:
self._axs = self._figure.axes
elif self._axs is not None:
self._axs = self._axs
self._samplerate = samplerate
self.auto = auto
self.max_samples = max_samples
self.max_dt_msec = 50
self.bin_size_msec = 2
self.max_dt_tp = self.max_dt_msec * self._samplerate / 1000
self.bin_size_tp = self.bin_size_msec * self._samplerate / 1000
def plot(self):
if self.auto:
self._do_plot()
else:
self._do_plot_matrix()
def figure(self):
return self._figure
def _do_plot_matrix(self):
units = self._unit_ids
if units is None:
units = self._SX.get_unit_ids()
nrows = ncols = len(units)
f, axs = plt.subplots(nrows, ncols, figsize=(3 * ncols + 0.1, 3 * nrows + 0.1))
self._figure = f
for i1, unit1 in enumerate(units):
times1 = self._SX.get_unit_spike_train(unit_id=unit1)
for i2, unit2 in enumerate(units):
times2 = self._SX.get_unit_spike_train(unit_id=unit2)
if i1 == i2:
(bin_counts, bin_edges) = compute_crosscorrelogram(times1, max_dt_tp=self.max_dt_tp, bin_size_tp=self.bin_size_tp, max_samples=self.max_samples)
else:
(bin_counts, bin_edges) = compute_crosscorrelogram(times1, times2, max_dt_tp=self.max_dt_tp, bin_size_tp=self.bin_size_tp, max_samples=self.max_samples)
item = dict(
title="{} -> {}".format(unit1, unit2),
bin_counts=bin_counts,
bin_edges=bin_edges
)
self._plot_correlogram(axs[i1, i2], **item)
def _do_plot(self):
units = self._unit_ids
if units is None:
units = self._SX.get_unit_ids()
list = []
for unit in units:
times = self._SX.get_unit_spike_train(unit_id=unit)
(bin_counts, bin_edges) = compute_autocorrelogram(times, max_dt_tp=self.max_dt_tp, bin_size_tp=self.bin_size_tp)
item = dict(
title=str(unit),
bin_counts=bin_counts,
bin_edges=bin_edges
)
list.append(item)
with plt.rc_context({'axes.edgecolor': 'gray'}):
self._plot_correlograms_multi(list)
def _plot_correlogram(self, ax, *, bin_counts, bin_edges, title=''):
wid = (bin_edges[1] - bin_edges[0]) * 1000
ax.bar(x=bin_edges[0:-1] * 1000, height=bin_counts, width=wid, color='gray', align='edge')
ax.set_xlabel('dt (msec)')
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title, color='gray')
def _plot_correlograms_multi(self, list, *, ncols=5, **kwargs):
nrows = int(np.ceil(len(list) / ncols))
if (self._figure is None) & (self._axs is None):
f, axs = plt.subplots(nrows, ncols, figsize=(3 * ncols + 0.1, 3 * nrows + 0.1))
self._figure = f
for i, item in enumerate(list):
ax = plt.subplot(nrows, ncols, i + 1)
self._plot_correlogram(ax, **item, **kwargs)
self._axs = axs
def compute_crosscorrelogram(x, y=None, *, max_dt_tp, bin_size_tp, max_samples=None):
if y is None:
y = x
auto = True
else:
auto = False
if max_samples is not None:
if max_samples < len(x):
x = np.random.choice(x, size=max_samples, replace=False)
if max_samples < len(y):
y = np.random.choice(y, size=max_samples, replace=False)
bin_start = -max_dt_tp
bin_stop = max_dt_tp
bin_edges = np.arange(start=bin_start, stop=bin_stop + bin_size_tp, step=bin_size_tp)
counts = np.zeros(len(bin_edges) - 1)
nbins = len(counts)
x = | np.sort(x) | numpy.sort |
# encoding: utf-8
#
# @Author: <NAME>, <NAME>
# @Date: Nov 15, 2021
# @Filename: ism.py
# @License: BSD 3-Clause
# @Copyright: <NAME>, <NAME>
import os.path
from astropy import units as u
from astropy import constants as c
import numpy as np
from astropy.io import fits, ascii
from astropy.table import Table
from scipy.special import sph_harm
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_scales
from astropy.coordinates import SkyCoord
from astropy.modeling.models import Sersic2D
from dataclasses import dataclass
import sys
if (sys.version_info[0]+sys.version_info[1]/10.) < 3.8:
from backports.cached_property import cached_property
else:
from functools import cached_property
from scipy.ndimage.interpolation import map_coordinates
from scipy.interpolate import interp1d, interp2d
import lvmdatasimulator
from lvmdatasimulator import log
import progressbar
from joblib import Parallel, delayed
from astropy.convolution import convolve_fft, kernels
from lvmdatasimulator.utils import calc_circular_mask, convolve_array, set_default_dict_values, \
ism_extinction, check_overlap, assign_units
fluxunit = u.erg / (u.cm ** 2 * u.s * u.arcsec ** 2)
velunit = u.km / u.s
def brightness_inhomogeneities_sphere(harm_amplitudes, ll, phi_cur, theta_cur, rho, med, radius, thickness):
"""
Auxiliary function producing the inhomogeneities on the brightness distribution for the Cloud of Bubble objects
using the spherical harmonics.
"""
brt = theta_cur * 0
for m in np.arange(-ll, ll + 1):
brt += (harm_amplitudes[m + ll * (ll + 1) - 1] * sph_harm(m, ll, phi_cur, theta_cur).real * med *
(1 - np.sqrt(abs(rho.value ** 2 / radius.value ** 2 - (1 - thickness / 2) ** 2))))
return brt
def sphere_brt_in_line(brt_3d, rad_3d, rad_model, flux_model):
"""
Auxiliary function computing the brightness of the Cloud or Bubble at given radii and in given line
according to the Cloudy models
"""
p = interp1d(rad_model, flux_model, fill_value='extrapolate', assume_sorted=True)
return p(rad_3d) * brt_3d
def interpolate_sphere_to_cartesian(spherical_array, x_grid=None, y_grid=None, z_grid=None,
rad_grid=None, theta_grid=None, phi_grid=None, pxscale=1. * u.pc):
"""
Auxiliary function to project the brightness or velocities from the spherical to cartesian coordinates
"""
x, y, z = np.meshgrid(x_grid, y_grid, z_grid, indexing='ij')
phi_c, theta_c, rad_c = xyz_to_sphere(x, y, z, pxscale=pxscale)
ir = interp1d(rad_grid, np.arange(len(rad_grid)), bounds_error=False)
ith = interp1d(theta_grid, np.arange(len(theta_grid)))
iphi = interp1d(phi_grid, np.arange(len(phi_grid)))
new_ir = ir(rad_c.ravel())
new_ith = ith(theta_c.ravel())
new_iphi = iphi(phi_c.ravel())
cart_data = map_coordinates(spherical_array, np.vstack([new_ir, new_ith, new_iphi]),
order=1, mode='constant', cval=0)
return cart_data.reshape([len(x_grid), len(y_grid), len(z_grid)]).T
def limit_angle(value, bottom_limit=0, top_limit=np.pi):
"""
Auxiliary function to limit the angle values to the range of [0, pi]
"""
value[value < bottom_limit] += (top_limit - bottom_limit)
value[value > top_limit] -= (top_limit - bottom_limit)
return value
def xyz_to_sphere(x, y, z, pxscale=1. * u.pc):
"""
Auxiliary function to map the coordinates from cartesian to spherical system
"""
phi_c = np.arctan2(y, x)
rad_c = (np.sqrt(x ** 2 + y ** 2 + z ** 2))
rad_c[rad_c == 0 * u.pc] = 1e-3 * pxscale
theta_c = (np.arccos(z / rad_c))
phi_c = limit_angle(phi_c, 0 * u.radian, 2 * np.pi * u.radian)
theta_c = limit_angle(theta_c, 0 * u.radian, np.pi * u.radian)
return phi_c, theta_c, rad_c
def find_model_id(file=lvmdatasimulator.CLOUDY_MODELS,
check_id=None, params=lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']):
"""
Checks the input parameters of the pre-computed Cloudy model and return corresponding index in the grid
"""
with fits.open(file) as hdu:
if check_id is None:
if params is None:
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning(f'Default Cloudy model will be used (id = {check_id})')
else:
summary_table = Table(hdu['Summary'].data)
indexes = np.arange(len(summary_table)).astype(int)
rec_table = np.ones(shape=len(summary_table), dtype=bool)
def closest(rec, prop, val):
unique_col = np.unique(summary_table[prop][rec])
if isinstance(val, str):
res = unique_col[unique_col == val]
if len(res) == 0:
return ""
return res
else:
return unique_col[np.argsort(np.abs(unique_col - val))[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
rec_table = rec_table & (summary_table[p] == closest(indexes, p, params[p]))
indexes = np.flatnonzero(rec_table)
if len(indexes) == 0:
break
if len(indexes) == 0 or len(indexes) == len(summary_table):
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
'Default Cloudy model will be used (id = {0})'.format(check_id))
elif len(indexes) == 1:
check_id = summary_table['Model_ID'][indexes[0]]
for p in params:
if p not in summary_table.colnames or params[p] is None or \
((isinstance(params[p], float) or
isinstance(params[p], int)) and ~np.isfinite(params[p])):
continue
if params[p] != summary_table[p][indexes[0]]:
log.warning(f'Use the closest pre-computed Cloudy model with id = {check_id}')
break
else:
check_id = summary_table['Model_ID'][indexes[0]]
log.warning(f'Select one of the closest pre-computed Cloudy model with id = {check_id}')
#
# for cur_ext in range(len(hdu)):
# if cur_ext == 0:
# continue
# found = False
# for p in params:
# if p == 'id':
# continue
# precision = 1
# if p == 'Z':
# precision = 2
# if np.round(params[p], precision) != np.round(hdu[cur_ext].header[p], precision):
# break
# else:
# found = True
# if found:
# return cur_ext, check_id
# check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
# log.warning('Input parameters do not correspond to any pre-computed Cloudy model.'
# 'Default Cloudy model will be used (id = {0})'.format(check_id))
extension_index = None
while extension_index is None:
extension_index = [cur_ext for cur_ext in range(len(hdu)) if (
check_id == hdu[cur_ext].header.get('MODEL_ID'))]
if len(extension_index) == 0:
if check_id == lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use the first one in the grid instead'.format(check_id))
extension_index = 1
else:
log.warning('Model_ID = {0} is not found in the Cloudy models grid. '
'Use default ({1}) instead'.format(check_id,
lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']))
check_id = lvmdatasimulator.CLOUDY_SPEC_DEFAULTS['id']
extension_index = None
else:
extension_index = extension_index[0]
return extension_index, check_id
@dataclass
class Nebula:
"""
Base class defining properties of every nebula type.
By itself it describes the rectangular nebula (e.g. DIG)
Constructed nebula has 4 dimensions, where 4th derive its appearance in different lines
(if spectrum_id is None, or if it is dark nebula => only one line)
"""
xc: int = None # Center of the region in the field of view, pix
yc: int = None # Center of the region in the field of view, pix
x0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
y0: int = 0 # Coordinates of the bottom-left corner in the field of view, pix
pix_width: int = None # full width of cartesian grid, pix (should be odd)
pix_height: int = None # full height of cartesian grid, pix (should be odd)
width: u.pc = 0 * u.pc # width of the nebula in pc (not used if pix_width is set up)
height: u.pc = 0 * u.pc # height of the nebula in pc (not used if pix_height is set up)
pxscale: u.pc = 0.01 * u.pc # pixel size in pc
spectrum_id: int = None # ID of a template Cloudy emission spectrum for this nebula
n_brightest_lines: int = None # limit the number of the lines to the first N brightest
sys_velocity: velunit = 0 * velunit # Systemic velocity
turbulent_sigma: velunit = 10 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
max_brightness: fluxunit = 1e-15 * fluxunit
max_extinction: u.mag = 0 * u.mag
perturb_scale: int = 0 * u.pc # Spatial scale of correlated perturbations
perturb_amplitude: float = 0.1 # Maximal amplitude of perturbations
_npix_los: int = 1 # full size along line of sight in pixels
nchunks: int = -1 # number of chuncks to use for the convolution. If negative, select automatically
vel_gradient: (velunit / u.pc) = 0 # velocity gradient along the nebula
vel_pa: u.degree = 0 # Position angle of the kinematical axis (for the velocity gradient or rotation velocity)
def __post_init__(self):
self._assign_all_units()
self._assign_position_params()
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
def _assign_all_units(self):
whole_list_properties = ['pxscale', 'sys_velocity', 'turbulent_sigma', 'max_brightness', 'max_extinction',
'perturb_scale', 'radius', 'PA', 'length', 'width', 'vel_gradient', 'r_eff',
'vel_rot', 'expansion_velocity', 'spectral_axis', 'vel_pa']
whole_list_units = [u.pc, velunit, velunit, fluxunit, u.mag, u.pc, u.pc, u.degree, u.pc, u.pc,
(velunit / u.pc), u.kpc, velunit, velunit, velunit, u.degree]
cur_list_properties = []
cur_list_units = []
for prp, unit in zip(whole_list_properties, whole_list_units):
if hasattr(self, prp):
cur_list_properties.append(prp)
cur_list_units.append(unit)
assign_units(self, cur_list_properties, cur_list_units)
def _assign_position_params(self, conversion_type='rect'):
if conversion_type == 'rect':
for v in ['height', 'width']:
if self.__getattribute__(f'pix_{v}') is None:
val = np.round((self.__getattribute__(v) / self.pxscale).value / 2.).astype(int) * 2 + 1
else:
val = np.round(self.__getattribute__(f'pix_{v}') / 2.).astype(int) * 2 + 1
setattr(self, f'pix_{v}', val)
elif conversion_type == 'ellipse':
self.pix_width = (np.round(np.abs(self.radius / self.pxscale * np.sin(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.radius / self.pxscale * np.cos(self.PA)) +
np.abs(self.radius / self.pxscale *
self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'galaxy':
self.pix_width = (np.round(np.abs(self.r_max * np.sin(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.cos(self.PA))).astype(int) * 2 + 1).value
self.pix_height = (np.round(np.abs(self.r_max * np.cos(self.PA)) +
np.abs(self.r_max * self.ax_ratio * np.sin(self.PA))).astype(int) * 2 + 1).value
elif conversion_type == 'cylinder':
self.pix_width = (np.ceil((self.length * np.abs(np.sin(self.PA)) +
self.width * np.abs(np.cos(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
self.pix_height = (np.ceil((self.length * np.abs(np.cos(self.PA)) +
self.width * np.abs(np.sin(self.PA))) / self.pxscale / 2.
).astype(int) * 2 + 1).value
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - np.round((self.pix_width - 1) / 2).astype(int)
self.y0 = self.yc - np.round((self.pix_height - 1) / 2).astype(int)
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + np.round((self.pix_width - 1) / 2).astype(int)
self.yc = self.y0 + np.round((self.pix_height - 1) / 2).astype(int)
@cached_property
def _cartesian_x_grid(self):
return np.arange(self.pix_width) * self.pxscale
@cached_property
def _cartesian_y_grid(self):
return np.arange(self.pix_height) * self.pxscale
@cached_property
def _cartesian_z_grid(self):
return np.arange(self._npix_los) * self.pxscale
@cached_property
def _max_density(self):
return self.max_extinction * (1.8e21 / (u.cm ** 2 * u.mag))
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
brt = np.ones(shape=(self.pix_height, self.pix_width, self._npix_los), dtype=float) / self._npix_los
if (self.perturb_scale > 0) and (self.perturb_amplitude > 0):
pertscale = (self.perturb_scale / self.pxscale).value
perturb = np.random.uniform(-1, 1, (self.pix_height, self.pix_width)
) * self.perturb_amplitude / self._npix_los
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
f = np.exp(-2 * (xx ** 2 + yy ** 2) / pertscale)
perturb = 4 / np.sqrt(np.pi) / pertscale * np.fft.ifft2(np.fft.fft2(perturb) * np.fft.fft2(f)).real
brt += (perturb[:, :, None] - np.median(perturb))
return brt
@cached_property
def _brightness_4d_cartesian(self):
"""
Derive the brightness (or density) distribution of the nebula for each emission line in cartesian coordinates
"""
if self.spectrum_id is None or self.linerat_constant:
flux_ratios = np.array([1.])
else:
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
flux_ratios = hdu[self.spectrum_id].data[1:, 1]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0] == 6562.81)
if self.n_brightest_lines is not None and \
(self.n_brightest_lines > 0) and (self.n_brightest_lines < len(flux_ratios)):
indexes_sorted = np.argsort(flux_ratios)[::-1]
flux_ratios = flux_ratios[indexes_sorted[: self.n_brightest_lines]]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0][indexes_sorted] == 6562.81)
if len(index_ha) == 1:
self._ref_line_id = index_ha[0]
return self._brightness_3d_cartesian[None, :, :, :] * flux_ratios[:, None, None, None]
@cached_property
def brightness_skyplane(self):
"""
Project the 3D nebula onto sky plane (for emission or continuum sources)
"""
if self.max_brightness > 0:
norm_max = self.max_brightness
else:
norm_max = 1
map2d = np.nansum(self._brightness_3d_cartesian, 2)
return map2d / np.nanmax(map2d) * norm_max
@cached_property
def brightness_skyplane_lines(self):
"""
Project the 3D emission nebula line onto sky plane (return images in each emission line)
"""
if self.max_brightness > 0:
map2d = np.nansum(self._brightness_4d_cartesian, 3)
return map2d / np.nanmax(map2d[self._ref_line_id, :, :]) * self.max_brightness
else:
return None
@cached_property
def extinction_skyplane(self):
"""
Project the 3D nebula onto sky plane (for dark clouds)
"""
if self.max_extinction > 0:
map2d = np.nansum(self._brightness_3d_cartesian, 2)
return map2d / np.nanmax(map2d) * self._max_density / (1.8e21 / (u.cm ** 2 * u.mag))
else:
return None
@cached_property
def vel_field(self):
return self._get_2d_velocity()
# if vel_field is None:
# return np.atleast_1d(self.sys_velocity)
# else:
# return vel_field + self.sys_velocity
def _get_2d_velocity(self):
if hasattr(self, 'vel_gradient') and (self.vel_gradient is not None) and (self.vel_gradient != 0):
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
vel_field = (- (xx - (self.pix_width - 1) / 2) * np.sin(self.vel_pa) +
(yy - (self.pix_height - 1) / 2) * np.cos(self.vel_pa)) * self.pxscale * self.vel_gradient
return vel_field
else:
return None
# @cached_property
# def line_profile(self):
# lprf = np.zeros(shape=len(self.los_velocity), dtype=float)
# lprf[np.floor(len(lprf) / 2.).astype(int)] = 1.
# return lprf
@dataclass
class Rectangle(Nebula):
"""
Class defining a simple rectangular component.
This is equal to Nebula, but no perturbations and turbulence by default
"""
perturb_amplitude: float = 0.0 # Maximal amplitude of perturbations
turbulent_sigma: velunit = 0 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
def __post_init__(self):
self._assign_all_units()
self._assign_position_params()
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@dataclass
class Ellipse(Nebula):
"""
Class defining a simple elliptical component.
No perturbations and turbulence by default
"""
perturb_amplitude: float = 0.0 # Maximal amplitude of perturbations
turbulent_sigma: velunit = 0 * velunit # Velocity dispersion due to turbulence; included in calculations of LSF
radius: u.pc = 1.0 * u.pc # Radius along the major axis of the ellipse (or radius of the circle)
PA: u.degree = 90 * u.degree # position angle of the major axis
ax_ratio: float = 1. # ratio of minor/major axes
def __post_init__(self):
self._assign_all_units()
self._npix_los = 1
self._assign_position_params(conversion_type='ellipse')
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
brt = np.ones(shape=(self.pix_height, self.pix_width), dtype=np.float32)
angle = (self.PA + 90 * u.degree).to(u.radian).value
xct = (xx - (self.pix_width - 1) / 2) * np.cos(angle) + \
(yy - (self.pix_height - 1) / 2) * np.sin(angle)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(angle) - \
(yy - (self.pix_height - 1) / 2) * np.cos(angle)
rmaj = (self.radius.to(u.pc) / self.pxscale.to(u.pc)).value
rmin = (self.radius.to(u.pc) / self.pxscale.to(u.pc)).value * self.ax_ratio
rec = (xct ** 2 / rmaj ** 2) + (yct ** 2 / rmin ** 2) >= 1
brt[rec] = 0
brt = brt.reshape((self.pix_height, self.pix_width, 1))
return brt
@dataclass
class Circle(Ellipse):
"""
Class defining a simple circular component.
"""
def __post_init__(self):
self._assign_all_units()
self.ax_ratio = 1.
self._npix_los = 1
self._assign_position_params(conversion_type='ellipse')
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@dataclass
class Filament(Nebula):
"""
Class of an isotropic cylindrical shape filament.
Defined by its position, length, PA, radius, maximal optical depth.
If it is emission-type filament, then also maximal brightness is required.
Velocity gradient also can be set up
"""
PA: u.degree = 90 * u.degree # position angle of the filament
length: u.pc = 10 * u.pc # full length of the filament
width: u.pc = 0.1 * u.pc # full width (diameter) of the filament
def __post_init__(self):
self._assign_all_units()
self._assign_position_params(conversion_type='cylinder')
self._npix_los = 1
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
brt = np.zeros_like(xx, dtype=np.float32)
xct = (xx - (self.pix_width - 1) / 2) * np.cos(self.PA + 90 * u.degree) + \
(yy - (self.pix_height - 1) / 2) * np.sin(self.PA + 90 * u.degree)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(self.PA + 90 * u.degree) - \
(yy - (self.pix_height - 1) / 2) * np.cos(self.PA + 90 * u.degree)
rad = ((self.width / self.pxscale).value / 2.)
len_px = ((self.length / self.pxscale).value / 2.)
rec = (np.abs(yct) <= rad) & (np.abs(xct) <= len_px)
brt[rec] = np.sqrt(1. - (yct[rec] / rad) ** 2)
brt = brt.reshape((self.pix_height, self.pix_width, 1))
return brt
@dataclass
class _ObsoleteFilament(Nebula):
"""
Class of an isotropic cylindrical shape filament.
Defined by its position, length, PA, radius, maximal optical depth
if it is emission-type filament, then maximal brightness
NB: this class is obsolete, but might be considered later in case of implementation of varying line ratios
"""
PA: u.degree = 90 * u.degree # position angle of the filament
length: u.pc = 10 * u.pc # full length of the filament
width: u.pc = 0.1 * u.pc # full width (diameter) of the filament
vel_gradient: (velunit / u.pc) = 0 # velocity gradient along the filament (to be added)
_theta_bins: int = 50
_rad_bins: int = 0
_h_bins: int = 2
_npix_los: int = 101
def __post_init__(self):
self._assign_all_units()
if self._rad_bins == 0:
self._rad_bins = np.ceil(self.width.to(u.pc).value / self.pxscale.to(u.pc).value * 5).astype(int)
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - np.round((len(self._cartesian_y_grid) - 1) / 2).astype(int)
self.y0 = self.yc - np.round((len(self._cartesian_z_grid) - 1) / 2).astype(int)
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + np.round((len(self._cartesian_y_grid) - 1) / 2).astype(int)
self.yc = self.y0 + np.round((len(self._cartesian_z_grid) - 1) / 2).astype(int)
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _theta_grid(self):
return np.linspace(0, 2 * np.pi, self._theta_bins)
@cached_property
def _h_grid(self):
return np.linspace(0, self.length, self._h_bins)
@cached_property
def _rad_grid(self):
return np.linspace(0, self.width / 2, self._rad_bins)
@cached_property
def _cartesian_y_grid(self):
npix = np.ceil(1.01 * (self.length * np.abs(np.sin(self.PA)) +
self.width * np.abs(np.cos(self.PA))) / self.pxscale).astype(int)
npix_l = npix / 2 - np.ceil(self.length / 2 * np.sin(-self.PA) / self.pxscale).astype(int)
return (np.linspace(0, npix, npix + 1) - npix_l) * self.pxscale
@cached_property
def _cartesian_z_grid(self):
npix = np.ceil(1.01 * (self.length * np.abs(np.cos(self.PA)) +
self.width * np.abs(np.sin(self.PA))) / self.pxscale).astype(int)
npix_l = npix / 2 - np.ceil(self.length / 2 * np.cos(-self.PA) / self.pxscale).astype(int)
return (np.linspace(0, npix, npix + 1) - npix_l) * self.pxscale
@cached_property
def _cartesian_x_grid(self):
return np.linspace(-1.01, 1.01, self._npix_los) * self.width / 2
@cached_property
def _brightness_3d_cylindrical(self):
"""
Method to calculate brightness (or opacity) of the cloud at given theta, phi and radii
theta: float -- azimuthal angle [0, 2 * np.pi]
rad: float -- radius [0, self.width / 2]
h: float -- height [0, self.length]
Returns:
3D cube of normalized brightness in theta-rad-h grid; total brightness = 1
"""
rho, theta, h = np.meshgrid(self._rad_grid, self._theta_grid, self._h_grid, indexing='ij')
brt = np.ones_like(theta)
brt[rho > (self.width / 2)] = 0
norm = np.sum(brt)
if norm > 0:
brt = brt / np.sum(brt)
return brt
@cached_property
def _brightness_3d_cartesian(self):
x, y, z = np.meshgrid(self._cartesian_x_grid, self._cartesian_y_grid,
self._cartesian_z_grid, indexing='ij')
h_c = -y * np.sin(self.PA) + z * np.cos(self.PA)
theta_c = np.arctan2(y * np.cos(self.PA) + z * np.sin(self.PA), x)
rad_c = np.sqrt(x ** 2 + (y * np.cos(self.PA) + z * np.sin(self.PA)) ** 2)
rad_c[rad_c == 0 * u.pc] = 1e-3 * self.pxscale
theta_c = limit_angle(theta_c, 0 * u.radian, 2 * np.pi * u.radian)
ir = interp1d(self._rad_grid, np.arange(self._rad_bins), bounds_error=False)
ith = interp1d(self._theta_grid, np.arange(self._theta_bins))
ih = interp1d(self._h_grid, np.arange(self._h_bins), bounds_error=False)
new_ir = ir(rad_c.ravel())
new_ith = ith(theta_c.ravel())
new_ih = ih(h_c.ravel())
cart_data = map_coordinates(self._brightness_3d_cylindrical,
np.vstack([new_ir, new_ith, new_ih]),
order=1, mode='constant', cval=0)
return cart_data.reshape([len(self._cartesian_x_grid),
len(self._cartesian_y_grid),
len(self._cartesian_z_grid)]).T
@dataclass
class Galaxy(Nebula):
"""
Class defining the galaxy object (set up it as Sersic2D profile assuming it has continuum and emission components)
"""
PA: u.degree = 90 * u.degree # position angle of the major axis
ax_ratio: float = 0.7 # ratio of minor/major axes
r_eff: u.kpc = 1 * u.kpc # Effective radius in kpc
rad_lim: float = 3. # Maximum radius for calculations (in R_eff)
n: float = 1. # Sersic index
vel_rot: velunit = 0 * velunit # Rotational velocity (not implemented yet)
def __post_init__(self):
self._assign_all_units()
self._npix_los = 1
self.r_max = self.r_eff.to(u.pc).value / self.pxscale.to(u.pc).value * self.rad_lim
self._assign_position_params(conversion_type='galaxy')
self._ref_line_id = 0
self.linerat_constant = True # True if the ratio of line fluxes shouldn't change across the nebula
@cached_property
def _brightness_3d_cartesian(self):
"""
Method to obtain the brightness (or density) distribution of the nebula in cartesian coordinates
"""
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
angle = (self.PA + 90 * u.degree).to(u.radian).value
mod = Sersic2D(amplitude=1, r_eff=(self.r_eff.to(u.pc) / self.pxscale.to(u.pc)).value,
n=self.n, x_0=(self.pix_width - 1) / 2, y_0=(self.pix_height - 1) / 2,
ellip=1 - self.ax_ratio, theta=angle)
brt = mod(xx, yy)
xct = (xx - (self.pix_width - 1) / 2) * np.cos(angle) + \
(yy - (self.pix_height - 1) / 2) * np.sin(angle)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(angle) - \
(yy - (self.pix_height - 1) / 2) * np.cos(angle)
rmaj = self.rad_lim * (self.r_eff.to(u.pc) / self.pxscale.to(u.pc)).value
rmin = self.rad_lim * (self.r_eff.to(u.pc) / self.pxscale.to(u.pc)).value * self.ax_ratio
mask = np.ones_like(brt, dtype=np.float32)
rec = (xct ** 2 / rmaj ** 2) + (yct ** 2 / rmin ** 2) >= 1
mask[rec] = 0
mask = convolve_fft(mask, kernels.Gaussian2DKernel(3.), fill_value=0, allow_huge=True)
brt = brt * mask
brt = brt.reshape(self.pix_height, self.pix_width, 1)
return brt
def _get_2d_velocity(self):
if hasattr(self, 'vel_rot') and (self.vel_rot is not None) and (self.vel_rot != 0):
xx, yy = np.meshgrid(np.arange(self.pix_width), np.arange(self.pix_height))
angle = (self.PA + 90 * u.degree).to(u.radian).value
xct = (xx - (self.pix_width - 1) / 2) * np.cos(angle) + \
(yy - (self.pix_height - 1) / 2) * np.sin(angle)
yct = (xx - (self.pix_width - 1) / 2) * np.sin(angle) - \
(yy - (self.pix_height - 1) / 2) * np.cos(angle)
rad = np.sqrt(xct ** 2 + yct ** 2)
vel_field = np.zeros_like(xx, dtype=np.float32) * velunit
rec = rad > 0
vel_field[rec] = self.vel_rot * np.sqrt(1 - self.ax_ratio ** 2) * xct[rec] / rad[rec]
return vel_field
else:
return None
@dataclass
class DIG(Nebula):
"""
Class defining the DIG component. For now it is defined just by its brightness (constant)
"""
max_brightness: fluxunit = 1e-17 * fluxunit
vel_gradient: (velunit / u.pc) = 0
@dataclass
class Cloud(Nebula):
"""Class of an isotropic spherical gas cloud without any ionization source.
Defined by its position, radius, density, maximal optical depth"""
radius: u.pc = 1.0 * u.pc
max_brightness: fluxunit = 0 * fluxunit
max_extinction: u.mag = 2.0 * u.mag
thickness: float = 1.0
perturb_degree: int = 0 # Degree of perturbations (max. degree of spherical harmonics for cloud)
linerat_constant: bool = False # True if the ratio of line fluxes shouldn't change across the nebula
_phi_bins: int = 90
_theta_bins: int = 90
_rad_bins: int = 0
_npix_los: int = 100
def __post_init__(self):
self._assign_all_units()
if self._rad_bins == 0:
self._rad_bins = np.ceil(self.radius.to(u.pc).value / self.pxscale.to(u.pc).value * 3).astype(int)
delta = np.round((len(self._cartesian_y_grid) - 1) / 2).astype(int)
if (self.xc is not None) and (self.yc is not None):
self.x0 = self.xc - delta
self.y0 = self.yc - delta
elif (self.x0 is not None) and (self.y0 is not None):
self.xc = self.x0 + delta
self.yc = self.y0 + delta
self._ref_line_id = 0
@cached_property
def _theta_grid(self):
return np.linspace(0, np.pi, self._theta_bins)
@cached_property
def _phi_grid(self):
return np.linspace(0, 2 * np.pi, self._phi_bins)
@cached_property
def _rad_grid(self):
return np.linspace(0, self.radius, self._rad_bins)
@cached_property
def _cartesian_z_grid(self):
npix = np.ceil(1.02 * self.radius / self.pxscale).astype(int)
return np.linspace(-npix, npix, 2 * npix + 1) * self.pxscale
@cached_property
def _cartesian_y_grid(self):
return self._cartesian_z_grid.copy()
@cached_property
def _cartesian_x_grid(self):
return np.linspace(-1.02, 1.02, self._npix_los) * self.radius
@cached_property
def _brightness_3d_spherical(self):
"""
Method to calculate brightness (or opacity) of the cloud at given theta, phi and radii
theta: float -- polar angle [0, np.pi]
phi: float -- azimuthal angle [0, 2 * np.pi]
rad: float -- radius [0, self.radius]
Returns:
3D cube of normalized brightness in theta-phi-rad grid; total brightness = 1
"""
rho, theta, phi = np.meshgrid(self._rad_grid, self._theta_grid, self._phi_grid, indexing='ij')
brt = np.ones_like(theta)
brt[rho < (self.radius * (1 - self.thickness))] = 0
brt[rho > self.radius] = 0
med = np.median(brt[brt > 0])
if self.perturb_degree > 0:
phi_cur = limit_angle(phi + np.random.uniform(0, 2 * np.pi, 1), 0, 2 * np.pi)
theta_cur = limit_angle(theta + np.random.uniform(0, np.pi, 1), 0, np.pi)
harm_amplitudes = self.perturb_amplitude * np.random.randn(self.perturb_degree * (self.perturb_degree + 2))
brt += np.nansum(Parallel(n_jobs=lvmdatasimulator.n_process)(delayed(brightness_inhomogeneities_sphere)
(harm_amplitudes, ll, phi_cur, theta_cur,
rho, med, self.radius, self.thickness)
for ll in np.arange(1,
self.perturb_degree + 1)),
axis=0)
brt[brt < 0] = 0
if med > 0:
brt = brt / np.nansum(brt)
return brt
@cached_property
def _brightness_4d_spherical(self):
"""
Method to calculate brightness of the cloud at given theta, phi and radii for each line
theta: float -- polar angle [0, np.pi]
phi: float -- azimuthal angle [0, 2 * np.pi]
rad: float -- radius [0, self.radius]
Returns:
4D cube of brightness in line-theta-phi-rad grid; normalized to the total brightness in Halpha
"""
s = self._brightness_3d_spherical.shape
if self.spectrum_id is None or self.linerat_constant:
return self._brightness_3d_spherical.reshape((1, s[0], s[1], s[2]))
rho, _, _ = np.meshgrid(self._rad_grid, self._theta_grid, self._phi_grid, indexing='ij')
with fits.open(lvmdatasimulator.CLOUDY_MODELS) as hdu:
radius = hdu[self.spectrum_id].data[0, 2:] * (self.thickness * self.radius) + \
self.radius * (1 - self.thickness)
fluxes = hdu[self.spectrum_id].data[1:, 2:]
radius = np.insert(radius, 0, self.radius * (1 - self.thickness))
fluxes = np.insert(fluxes, 0, fluxes[:, 0], axis=1)
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0] == 6562.81)
if self.n_brightest_lines is not None and \
(self.n_brightest_lines > 0) and (self.n_brightest_lines < len(fluxes)):
indexes_sorted = np.argsort(hdu[self.spectrum_id].data[1:, 1])[::-1]
fluxes = fluxes[indexes_sorted[:self.n_brightest_lines], :]
index_ha = np.flatnonzero(hdu[self.spectrum_id].data[1:, 0][indexes_sorted] == 6562.81)
if len(index_ha) == 1:
self._ref_line_id = index_ha[0]
brt = np.array(Parallel(n_jobs=lvmdatasimulator.n_process)(delayed(sphere_brt_in_line)
(self._brightness_3d_spherical, rho,
radius, flux)
for flux in fluxes)).reshape((fluxes.shape[0],
s[0], s[1], s[2]))
return brt / np.nansum(brt[self._ref_line_id])
@cached_property
def _brightness_3d_cartesian(self):
return interpolate_sphere_to_cartesian(self._brightness_3d_spherical, x_grid=self._cartesian_x_grid,
y_grid=self._cartesian_y_grid, z_grid=self._cartesian_z_grid,
rad_grid=self._rad_grid, theta_grid=self._theta_grid,
phi_grid=self._phi_grid, pxscale=self.pxscale)
@cached_property
def _brightness_4d_cartesian(self):
s = self._brightness_4d_spherical.shape
return np.array(Parallel(n_jobs=lvmdatasimulator.n_process)(delayed(interpolate_sphere_to_cartesian)
(cur_line_array,
self._cartesian_x_grid, self._cartesian_y_grid,
self._cartesian_z_grid, self._rad_grid,
self._theta_grid, self._phi_grid, self.pxscale)
for cur_line_array in self._brightness_4d_spherical)
).reshape((s[0], len(self._cartesian_z_grid), len(self._cartesian_y_grid),
len(self._cartesian_x_grid)))
@dataclass
class Bubble(Cloud):
"""Class of an isotropic thin expanding bubble."""
spectral_axis: velunit = | np.arange(-20, 20, 10) | numpy.arange |
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 <NAME>, <NAME>, <NAME>
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import numpy, sys, numpy, types, pickle, time, math
import condor.utils.icosahedron as icosahedron
import condor.utils.linalg as linalg
import logging
logger = logging.getLogger(__name__)
from .log import log_and_raise_error,log_warning,log_info,log_debug
def make_sphere_map(N,nR):
"""
Generate a 3D map of a sphere particle on a regular grid (values between 0 and 1)
The result is quite rough (i.e. linear interpolation)
Args:
:N (int): Edge length of the grid in unit pixels
:nR (float): Radius in unit pixels
.. note:: This function was written for testing purposes and generates a map with rough edges. Use :class:`condor.particle.particle_sphere.ParticleSphere` for more accurate uniform sphere diffraction simulations.
"""
X,Y,Z = 1.0*numpy.mgrid[0:N,0:N,0:N]
X = X-(N-1)/2.
Y = Y-(N-1)/2.
Z = Z-(N-1)/2.
R = numpy.sqrt(X**2+Y**2+Z**2)
spheremap = numpy.zeros(shape=R.shape,dtype="float64")
spheremap[R<=nR] = 1
# Linear interpolation at the transition
spheremap[abs(nR-R)<0.5] = 0.5+0.5*(nR-R[abs(nR-R)<0.5])
return spheremap
def make_spheroid_map(N, nA, nC, rotation=None):
"""
Generate a 3D binary map of a spheroid particle on a regular grid
The result is very rough (i.e. nearest-neighbor interpolation)
Args:
:N (int): Edge length of the grid in unit pixels
:nA (float): Radius perpendicular to the rotation axis of the ellipsoid in unit pixels
:nC (float): Radius along the rotation axis of the ellipsoid in unit pixels
Kwargs:
:rotation (:class:`condor.utils.rotation.Rotation`): Rotation instance for extrinsic rotation of the icosahedron.
.. note:: This function was written for testing purposes and generates a map with rough edges. Use :class:`condor.particle.particle_spheroid.ParticleSpheroid` for more accurate uniform spheroid diffraction simulations.
"""
X,Y,Z = 1.0*numpy.mgrid[0:N,0:N,0:N]
X = X-(N-1)/2.
Y = Y-(N-1)/2.
Z = Z-(N-1)/2.
R_sq = X**2+Y**2+Z**2
e_c = numpy.array([0.0,1.0,0.0])
if rotation is not None:
e_c = rotation.rotate_vector(e_c)
d_sq_c = ((X*e_c[0])+(Y*e_c[1])+(Z*e_c[2]))**2
r_sq_c = abs( R_sq * (1 - (d_sq_c/(R_sq+numpy.finfo("float32").eps))))
spheroidmap = r_sq_c/float(nA)**2+d_sq_c/float(nC)**2
spheroidmap[spheroidmap<=1] = 1
spheroidmap[spheroidmap>1] = 0
return spheroidmap
def make_icosahedron_map(N,nRmax,extrinsic_rotation=None):
"""
Generate map of a uniform icosahedron (density = 1) on a regular grid
Orientation: The cartesian grid axis all lie parallel to 2-fold symmetry axes of the icosahedron.
Args:
:N (int): Edge length of the grid in unit pixels
:nRmax (float): Outer radius of the icosahedron in unit pixels
Kwargs:
:rotation (:class:`condor.utils.rotation.Rotation`): Rotation instance for extrinsic rotation of the icosahedron.
"""
log_debug(logger, "Building icosahedral geometry")
log_debug(logger, "Grid: %i x %i x %i (%i voxels)" % (N,N,N,N**3))
t0 = time.time()
if extrinsic_rotation is not None:
q = extrinsic_rotation.get_as_quaternion()
icomap = icosahedron.icosahedron(N,nRmax,q)
else:
icomap = icosahedron.icosahedron(N,nRmax)
t1 = time.time()
log_debug(logger, "Built map within %f seconds." % (t1-t0))
return icomap
def make_icosahedron_map_slow(N,nRmax,extrinsic_rotation=None):
"""
Generate map of a uniform icosahedron (density = 1) on a regular grid (*slow python implementation*)
Orientation: The cartesian grid axis all lie parallel to 2-fold symmetry axes of the icosahedron.
Args:
:N (int): Edge length of the grid in unit pixels
:nRmax (float): Outer radius of the icosahedron in unit pixels
Kwargs:
:rotation (:class:`condor.utils.rotation.Rotation`): Rotation instance for extrinsic rotation of the icosahedron.
"""
na = nRmax/numpy.sqrt(10.0+2*numpy.sqrt(5))*4.
nRmin = numpy.sqrt(3)/12*(3.0+numpy.sqrt(5))*na # radius at faces
log_debug(logger, "Building icosahedral geometry")
n_list = get_icosahedron_normal_vectors()
# Rotate
if extrinsic_rotation is not None:
n_list = extrinsic_rotation.rotate_vectors(numpy.array(n_list))
X,Y,Z = 1.0*numpy.mgrid[0:N,0:N,0:N]
X = X - (N-1)/2.
Y = Y - (N-1)/2.
Z = Z - (N-1)/2.
log_debug(logger, "Grid: %i x %i x %i (%i voxels)" % (N,N,N,N**3))
icomap = numpy.zeros((len(n_list),N,N,N))
# calculate distance of all voxels to all faces (negative inside, positive outside icosahedron)
for i in range(len(n_list)):
icomap[i,:,:,:] = (X*n_list[i][0]+Y*n_list[i][1]+Z*n_list[i][2])+nRmin
s = 1.
M = icomap.copy()
temp = abs(M)<0.5*s
icomap[temp] = 0.5+icomap[temp]/s
icomap[M<(-0.5)*s] = 0
icomap[M>0.5*s] = 1
icomap = icomap.min(0)
return icomap
def get_icosahedron_vertices():
"""
Return array of vertices vectors of a regular icosahedron
"""
# Weisstein, <NAME>. "Icosahedral Group." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/IcosahedralGroup.html
phi = (1+numpy.sqrt(5))/2.0
x1 = numpy.array([0.0,1.0,phi])
x2 = | numpy.array([0.0,1.0,-phi]) | numpy.array |
import os
import sys
import scipy.io
import scipy.misc
from nst_utils import *
import numpy as np
import cv2
import random
from tqdm import tqdm
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
model_global = None
sess_global = None
def set_config1(config):
global min_box_w, max_box_w, min_offset, max_offset, max_iterations
def compute_content_cost(a_C, a_G):
# obtendo as dimensões do tensor a_G
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Reshape a_C and a_G
a_C_unrolled = tf.reshape(a_C,[n_H*n_W,n_C])
a_G_unrolled = tf.reshape(a_G,[n_H*n_W,n_C])
# Calcule a função de custo
J_content = (1/(4*n_H*n_W*n_C))*tf.reduce_sum(tf.square(tf.subtract(a_C_unrolled,a_G_unrolled)))
return J_content
def gram_matrix(A):
GA = tf.matmul(A,A,transpose_b=True)
return GA
def compute_layer_style_cost(a_S, a_G):
# Obtendo as dimensões de a_G (≈1 line)
m, n_H, n_W, n_C = a_G.get_shape().as_list()
# Resahepe dos tensores (n_C, n_H*n_W) (≈2 lines)
a_S = tf.reshape(tf.transpose(a_S),[n_C, n_H*n_W])
a_G = tf.reshape(tf.transpose(a_G),[n_C, n_H*n_W])
# Calculando as matrizes Gram
GS = gram_matrix(a_S)
GG = gram_matrix(a_G)
# Calculando a perda
J_style_layer = tf.reduce_sum(tf.square(tf.subtract(GS,GG)))*(1/(4*(n_C**2)*( (n_H*n_W)**2 )))
return J_style_layer
STYLE_LAYERS = [
('conv1_1', 0.1),
('conv2_1', 0.1),
('conv3_1', 2.0),
('conv4_1', 1.0),
('conv5_1', 1.0)]
def compute_style_cost(sess, model, STYLE_LAYERS):
J_style = 0
for layer_name, coeff in STYLE_LAYERS:
#Obtendo o tensor atual
out = model[layer_name]
#Obtendo a ativação do tensor
a_S = sess.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model[layer_name]
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Calculando o custo
J_style_layer = compute_layer_style_cost(a_S, a_G)
# adicionando o coeficiente ao custo
J_style += coeff * J_style_layer
return J_style
def total_cost(J_content, J_style, alpha = 10, beta = 80):
J = alpha*J_content + beta*J_style
return J
def model_nn(sess, model, train_step, J, J_content, J_style, input_image, num_epochs = 100):
# inicializando as variaveis
sess.run(tf.global_variables_initializer())
# Run the noisy input image (initial generated image) through the model. Use assign().
sess.run(model['input'].assign(input_image))
for i in tqdm(range(num_epochs)):
#Rode o "train_step" para minimizar o custo total
sess.run(train_step)
#Computar a imagem gerada rodando o model['input']
generated_image = sess.run(model['input'])
#Printar informaç˜oes
#if i%1000 == 0:
# Jt, Jc, Js = sess.run([J, J_content, J_style])
# print("Iteration " + str(i) + " :")
# print("total cost = " + str(Jt))
# print("content cost = " + str(Jc))
# print("style cost = " + str(Js))
# salvando a última imagem
generated_image = restore_image(generated_image)
return np.squeeze(generated_image)
def print_feature_map(sess_global, model_global, layer_name, sufix):
feature_maps = sess_global.run(model_global[layer_name])
print("Saída do tensor:",feature_maps.shape)
folder_name = layer_name+sufix
for c in range(feature_maps.shape[-1]):
if not os.path.isdir(folder_name):
os.mkdir(folder_name)
file_name = folder_name+"/"+str(c)+".jpg"
if os.path.exists(file_name):
os.remove(file_name)
cv2.imwrite(file_name, feature_maps[0, :, :, c])
plt.imshow(feature_maps[0, :, :,c], cmap="gray")
plt.pause(0.1)
def run_style_tranfer(STYLE_W, content_image, style_image, num_epochs=100, lr=2.0, output_gray=True):
global model_global, sess_global
print("Params:")
if STYLE_W is not None:
STYLE_LAYERS = STYLE_W
print(STYLE_LAYERS)
print("lr", lr)
print("num_epochs", num_epochs)
if model_global is None:
# Reset the graph
tf.reset_default_graph()
#Intanciando a sessao
sess_global = tf.InteractiveSession()
model_global = load_vgg_model("pretrained-model/imagenet-vgg-verydeep-19.mat")
#print("loading images ...")
content_image = reshape_and_normalize_image(content_image)
#print("content image loaded")
style_image = reshape_and_normalize_image(style_image)
#print("style image loaded")
generated_image = generate_noise_image(content_image)
# Assign da imagem de conteúdo na entrada da rede VGG-19.
sess_global.run(model_global['input'].assign(content_image))
#-----------------------------
#print_feature_map(sess_global, model_global, 'conv1_2', 'signal')
#print_feature_map(sess_global, model_global, 'conv2_2', 'signal')
#print_feature_map(sess_global, model_global, 'conv3_4', 'signal')
#print_feature_map(sess_global, model_global, 'conv4_2', 'signal')
#Obtendo o tensor te saida conv4_2
out = model_global['conv4_2']
#saída de ativação do tensor conv4_2
a_C = sess_global.run(out)
# Set a_G to be the hidden layer activation from same layer. Here, a_G references model['conv4_2']
# and isn't evaluated yet. Later in the code, we'll assign the image G as the model input, so that
# when we run the session, this will be the activations drawn from the appropriate layer, with G as input.
a_G = out
# Compute the content cost
J_content = compute_content_cost(a_C, a_G)
# Assign the input of the model to be the "style" image
sess_global.run(model_global['input'].assign(style_image))
# Compute the style cost
J_style = compute_style_cost(sess_global, model_global, STYLE_LAYERS)
J = total_cost(J_content, J_style)
# define optimizer (1 line)
optimizer = tf.train.AdamOptimizer(lr)
# define train_step (1 line)
train_step = optimizer.minimize(J)
# inicializando as variaveis
sess_global.run(tf.global_variables_initializer())
# Run the noisy input image (initial generated image) through the model. Use assign().
sess_global.run(model_global['input'].assign(generated_image))
#print("initializing style tranfer process")
final_img = model_nn(sess_global, model_global, train_step, J, J_content, J_style, generated_image, num_epochs = num_epochs)
return final_img
def gen_mask(shape, config=0):
boxes_x_list = []
mask_image = np.ndarray(shape=shape, dtype=np.uint8)
mask_image[:,:] = 0.7
cursor_1 = 5
cursor_2 = 5
min_box_w = 0
max_box_w = 0
min_offset = 0
max_offset = 0
max_iterations = 0
if config == 0:
min_box_w = 5
max_box_w = 80
min_offset = 35
max_offset = 100
max_iterations=5
else:
min_box_w = 5
max_box_w = 15
min_offset = 100
max_offset = 250
max_iterations = 3
iterations = random.randint(1, max_iterations)
while(cursor_2 < shape[1] and iterations > 0):
rand_offset = random.randint(min_offset, max_offset)
rand_box_w = random.randint(min_box_w,max_box_w)
cursor_1 = cursor_2 + rand_offset
cursor_2 = cursor_1 + rand_box_w
if cursor_1 > shape[1] or cursor_2 > shape[1]:
break
mask_image[:,cursor_1:cursor_2] = 1
boxes_x_list.append((cursor_1, cursor_2))
iterations = iterations -1
return mask_image, boxes_x_list
def generate_ugly_sismo(good_img_path, ugly_img_path, mask_list):
gen_image_list = []
for mask in mask_list:
mask_image = mask[0]
content_img = cv2.imread(good_img_path, 0)
content_img = cv2.resize(content_img, (400,300), interpolation=cv2.INTER_AREA)
content_img_masked = np.multiply(content_img, mask_image)
#content_img_masked = cv2.cvtColor(content_img_masked, cv2.COLOR_GRAY2RGB)
#imshow(content_img_masked, cmap="gray", vmin=0, vmax=255)
style_img = cv2.imread(ugly_img_path, 0)
#style_img = cv2.cvtColor(style_img, cv2.COLOR_BGR2RGB)
style_img = cv2.resize(style_img, (400,300), interpolation=cv2.INTER_AREA)
gen_image = run_style_tranfer(content_image=content_img, style_image=style_img)
#gen_image = run_style_tranfer(content_image=content_img_masked, style_image=style_img)
gen_image_list.append(gen_image)
return gen_image_list
def analyze_region(region):
#print("shape:", region.shape)
#min = np.amin(region)
#print("min", min)
#max = np.amax(region)
#print("max", max)
mean = np.mean(region)
#print("mean", mean)
return mean
def center_image(image, boxes_x, margin=10):
centered_img = | np.ndarray(shape=image.shape) | numpy.ndarray |
import numpy as np
import argparse, os, sys, h5py
from hfd.variables import label_df
parser = argparse.ArgumentParser(description='Add latent annotations to h5s.')
parser.add_argument('folder', type=str, help='Folder to search for h5 files.')
parser.add_argument('fontsize', type=int, help='Fontsize.')
args = parser.parse_args()
folder = args.folder
fontsize =args.fontsize
labels = ['initial_geometry', 'medial_geometry', 'final_geometry', 'all_geometry']
bof = ['atom_bof', 'atom_mod_rotations_bof']
files = []
for d, _, files in os.walk(folder):
for fname in files:
if '{}.h5'.format(fontsize) in fname:
with h5py.File(os.path.join(d, fname), 'a') as f:
for l in labels:
try:
del f[l]
except KeyError:
pass
f.create_dataset(l, data=label_df[l].values)
for l in bof:
try:
del f[l]
except KeyError:
pass
f.create_dataset(l, data= | np.stack([*label_df[l].values]) | numpy.stack |
# Routines for general quantum chemistry (no particular software package)
# Python3 and pandas
# <NAME>
#
import re, sys
#import string, copy
import copy
import numpy as np
import pandas as pd
import quaternion
from scipy.spatial.distance import cdist
from scipy import interpolate
from scipy import optimize
import matplotlib.pyplot as plt
#
# CODATA 2018 constants from physics.nist.gov, retrieved 7/13/2020
AVOGADRO = 6.02214076e23 # mol^-1 (exact, defined value)
BOLTZMANN = 1.380649e-23 # J/K (exact, defined value)
RGAS = AVOGADRO * BOLTZMANN # J/mol/K (exact)
PLANCK = 6.62607015e-34 # J s (exact, defined value)
CLIGHT = 299792458. # m/s (exact, defined value)
CM2KJ = PLANCK * AVOGADRO * CLIGHT / 10 # convert from cm^-1 to kJ/mol
CM2K = 100 * CLIGHT * PLANCK / BOLTZMANN # convert from cm^-1 to Kelvin
AMU = 1.66053906660e-27 # kg/u
HARTREE = 4.3597447222071e-18 # J; uncertainty is 85 in last two digits
AU2CM = 2.1947463136320e05 # Hartree in cm^-1; unc. is 43 in last two digits
AU2KJMOL = HARTREE * AVOGADRO / 1000. # Hartree in kJ/mol
AU2EV = 27.211386245988 # Hartree in eV; unc. is 53 in last two digits
CALORIE = 4.184 # multipy cal * CALORIE to get J
ATM_KPA = 101.325 # convert pressure in atm to kPa
EMASS = 9.1093837015e-31 # electron mass in kg; unc. is 28 in last two digits
BOHR = 0.529177210903 # Bohr radius in Angstrom; unc. is 80 in last two digits
AMU2AU = AMU / EMASS # amu expressed in a.u. (viz., electron masses)
EV2CM = AU2CM / AU2EV # eV expressed in cm^-1
EPS0 = 8.8541878128e-12 # vacuum permittivity in F/m
PI = np.pi
#
GOLD = (1 + np.sqrt(5))/2 # golden ratio
def isotopic_mass(atlabel):
# Given a label like '1-H' or 'pt195', return the atomic mass
# Data from from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl
rxn = re.compile('\d+')
rxsym = re.compile('[a-zA-Z]+')
n = int(rxn.search(atlabel).group(0))
sym = rxsym.search(atlabel).group(0)
Z = elz(sym)
# table of masses; major index = Z, minor = n
mtable = {1: {1: 1.00782503223, 2: 2.01410177812, 3: 3.0160492779},
2: {3: 3.0160293201, 4: 4.00260325413},
3: {6: 6.0151228874, 7: 7.0160034366},
4: {9: 9.012183065},
5: {10: 10.01293695, 11: 11.00930536},
6: {12: 12., 13: 13.00335483507, 14: 14.0032419884},
7: {14: 14.00307400443, 15: 15.00010889888},
8: {16: 15.99491461957, 17: 16.99913175650, 18: 17.99915961286},
9: {19: 18.99840316273},
16: {32: 31.9720711744, 33: 32.9714589098, 34: 33.967867004, 36: 35.96708071},
17: {35: 34.968852682, 37: 36.965902602},
35: {79: 78.9183376, 81: 80.9162897},
53: {127: 126.9044719},
78: {190: 189.9599297, 192: 191.9610387, 194: 193.9626809, 195: 194.9647917, 196: 195.96495209, 198: 197.9678949},
}
try:
m = mtable[Z][n]
except KeyError:
# invalid or just not typed here yet
m = np.nan
return m
##
def dominant_isotope(el):
# given element symbol or atomic number,
# return the mass of the most abundant isotope
# source: https://www.chem.ualberta.ca/~massspec/atomic_mass_abund.pdf,
# which cites mass data from Audi & Wapstra, Nucl. Phys. A (1993 & 1995)
# and abundance data from 1997 IUPAC report [Rosman & Taylor,
# Pure Appl. Chem. (1999)]
try:
Z = int(el)
except:
Z = elz(el)
mtable = [0, 1.007825, 4.002603, 7.016004, 9.012182, 11.009305, 12., # C
14.003074, 15.994915, 18.998403, 19.992440, 22.989770, # Na
23.985042, 26.981538, 27.976927, 30.973762, 31.972071, # S
34.968853, 39.962383, 38.963707, 39.962591, 44.955910, # Sc
47.947947, 50.943964, 51.940512, 54.938050, 55.934942, # Fe
58.933200, 57.935348, 62.929601, 63.929147, 68.925581, # Ga
73.921178, 74.921596, 79.916522, 78.918338, 83.911507, # Kr
84.911789, 87.905614, 88.905848, 89.904704, 92.906378, # Nb
97.905408, 97.907216, 101.904350, 102.905504, 105.903483, # Pd
106.905093, 113.903358, 114.903878, 119.902197, # Sn
120.903818, 129.906223, 126.904468, 131.904154, # Xe
132.905447, 137.905241, 138.906348, 139.905434, # Ce
140.907648, 141.907719, 144.912744, 151.919728, # Sm
152.921226, 157.924101, 158.925343, 163.929171, # Dy
164.930319, 165.930290, 168.934211, 173.938858, # Yb
174.940768, 179.946549, 180.947996, 183.950933, # W
186.955751, 191.961479, 192.962924, 194.964774, # Pt
196.966552, 201.970626, 204.974412, 207.976636, # Pb
208.980383, 208.982416, 209.987131, 222.017570, # Rn
223.019731, 226.025403, 227.027747, 232.038050, # Th
231.035879, 238.050783, 237.048167, 244.064198] # Pu
return mtable[Z]
##
def RRHO_symmtop(freqs, Emax, binwidth, ABC_GHz, Bunit='GHz'):
# RRHO with symmetric-top approximation.
# Use Stein-Rabinovitch counting method (less roundoff error than
# with Beyer-Swinehart)
# ** Does not account for any symmetry **
n = int(Emax/binwidth) # number of bins
nos = np.zeros(n) # number of states in each bin
nos[0] = 1 # the zero-point level
for freq in freqs:
Eladder = np.arange(freq, Emax+binwidth, freq)
iladder = np.rint(Eladder / binwidth).astype(int)
miyo = nos.copy() # temporary copy of 'nos'
# add each value in ladder to existing count in 'nos'
for irung in iladder:
for ibin in range(irung, n):
miyo[ibin] += nos[ibin - irung]
nos = miyo.copy()
# Do similar thing for the rotational levels.
E_rot, g_rot = rotational_levels_symmtop(ABC_GHz, Emax, Bunit=Bunit)
ilist = np.rint(E_rot / binwidth).astype(int).reshape(-1)
miyo = nos.copy()
for idx in range(1, len(ilist)):
# Loop over this index, instead of the 'iladder' values,
# to find the matching rotational degeneracies.
# Start from 1 instead of 0 to skip the (non-degenerate) J=0
irung = ilist[idx]
degen = g_rot[idx]
# vectorized version
binrange = np.arange(irung, n).astype(int)
miyo[binrange] = miyo[binrange] + nos[binrange - irung] * degen
nos = miyo.copy()
# find centers of energy bins
centers = binwidth * (0.5 + np.arange(n))
return nos, centers
##
def rotational_levels_symmtop(ABC, Emax, Bunit='cm-1'):
# Rigid-rotor levels for a symmetric top
# Return two arrays: energies (in cm^-1) and degeneracies
# 'ABC' are the three rotational constants, either in GHz or cm^-1
# 'Emax' is the upper bound on energy, in cm^-1
ABC = np.array(ABC)
ABC[::-1].sort() # sort in descending order
if Bunit.lower() == 'ghz':
# convert ABC to cm^-1
ABC *= 1.0e7 / CLIGHT
if (ABC[0]-ABC[1] > ABC[1]-ABC[2]):
# call it prolate
B = np.sqrt(ABC[1]*ABC[2]) # geometric mean; "perpendicular"
A = ABC[0]
Jmax = int(-0.5 + 0.5 * np.sqrt(1 + 4*Emax/B))
else:
# call it oblate
B = np.sqrt(ABC[1]*ABC[0]) # geometric mean; "perpendicular"
A = ABC[2]
Jmax = int( (-B + np.sqrt(B*B+4*A*Emax)) / (2*A) )
J = np.arange(Jmax+1) # all allowed values of J, including Jmax
# K = 0 cases
E = B * J * (J + 1)
degen = 2*J + 1
# K != 0 cases
C = A-B
for J in range(1,Jmax+1):
# now J is a scalar
K = np.arange(1, J+1)
Kstack = B*J*(J+1) + C * K * K
g = 2 * (2*J+1) * np.ones_like(K)
E = np.concatenate((E, Kstack))
degen = np.concatenate((degen, g))
# sort by increasing energy
idx = np.argsort(E)
E = E[idx]
degen = degen[idx]
# filter out energies that exceed Emax
idx = np.argwhere(E <= Emax)
return E[idx], degen[idx]
##
def rotational_levels_spherical(B, Emax, Bunit='cm-1'):
# Rigid-rotor levels for a spherical top
# Return two arrays: energies (in cm^-1) and degeneracies
# 'B' is the rotational constant, either in GHz or cm^-1
# 'Emax' is the upper bound on energy, in cm^-1
if Bunit.lower() == 'ghz':
# convert B to cm^-1
B *= 1.0e7 / CLIGHT
Jmax = int(-0.5 + 0.5 * np.sqrt(1 + 4*Emax/B))
J = np.arange(Jmax+1) # all allowed values of J, including Jmax
E = B * J * (J+1)
degen = 2*J + 1
degen *= degen # this line is the only difference from the linear case
return E, degen
##
def rotational_levels_linear(B, Emax, Bunit='cm-1'):
# Rigid-rotor levels for a linear molecule
# Return two arrays: energies (in cm^-1) and degeneracies
# 'B' is the rotational constant, either in GHz or cm^-1
# 'Emax' is the upper bound on energy, in cm^-1
if Bunit.lower() == 'ghz':
# convert B to cm^-1
B *= 1.0e7 / CLIGHT
Jmax = int(-0.5 + 0.5 * np.sqrt(1 + 4*Emax/B))
J = np.arange(Jmax+1) # all allowed values of J, including Jmax
E = B * J * (J+1)
degen = 2*J + 1
return E, degen
##
def Beyer_Swinehart(freqs, Emax, binwidth):
# Return a harmonic vibrational density of states (numpy array)
# whose index is the energy bin number.
# Also return an array of the bin center energies.
# Not vectorized
n = int(Emax/binwidth) # number of bins
nos = np.zeros(n) # number of states in each bin
nos[0] = 1 # the zero-point level
for freq in freqs:
# outer loop in BS paper
ifreq = np.rint(freq/binwidth).astype(int)
for ibin in range(ifreq, n):
# inner loop
nos[ibin] += nos[ibin - ifreq]
# find centers of energy bins
centers = binwidth * (0.5 + np.arange(n))
return nos, centers
##
def thermo_RRHO(T, freqs, symno, ABC_GHz, mass, pressure=1.0e5, deriv=0):
# Return S, Cp, and [H(T)-H(0)] at the specified temperature
lnQ = lnQvrt(T, freqs, symno, ABC_GHz, mass)
d = lnQvrt(T, freqs, symno, ABC_GHz, mass, deriv=1) # derivative of lnQ
deriv = T * d + lnQ # derivative of TlnQ
S = RGAS * (deriv - np.log(AVOGADRO) + 1)
d2 = lnQvrt(T, freqs, symno, ABC_GHz, mass, deriv=2) # 2nd derivative of lnQ
deriv2 = 2 * d + T * d2 # 2nd derivative of TlnQ
Cp = RGAS + RGAS * T * deriv2
ddH = RGAS * T * (1 + T * d) / 1000
return (S, Cp, ddH)
##
def lnQvrt(T, freqs, symno, ABC_GHz, mass, pressure=1.0e5, deriv=0):
# Return the total (vib + rot + transl) ln(Q) partition function
# or a derivative. RRHO approximation
lnQv = lnQvib(T, freqs, deriv=deriv)
lnQr = lnQrot(T, symno, ABC_GHz, deriv=deriv)
lnQt = lnQtrans(T, mass, pressure=pressure, deriv=deriv)
lnQ = lnQv + lnQr + lnQt
return lnQ
##
def lnQtrans(T, mass, pressure=1.0e5, deriv=0):
# Given a temperature (in K), a molecular mass (in amu),
# and optionally a pressure (in Pa), return ln(Q), where
# Q is the ideal-gas translational partition function.
# If deriv > 0, return a (1st or 2nd) derivative of TlnQ
# instead of lnQ.
if deriv == 1:
# return (d/dT)lnQ = (3/2T)
return (1.5 / T)
if deriv == 2:
# return (d2/dT2)lnQ = -(3/2T**2)
return (-1.5 / (T*T))
kT = BOLTZMANN * T # in J
m = mass * AMU # in kg
V = RGAS * T / pressure # in m**3
lnQ = 1.5 * np.log(2 * PI * m * kT)
lnQ -= 3 * np.log(PLANCK)
lnQ += np.log(V)
return lnQ
##
def lnQrot(T, symno, ABC_GHz, deriv=0):
# Given a temperature (in K), symmetry number, and list of
# rotational constants (in GHz), return ln(Q), where Q is
# the rigid-rotor partition function.
n = len(ABC_GHz)
if n == 0:
# atom; no rotations possible
return 0.
if deriv == 1:
# first derivative of lnQ depends only on temperature
if n < 3:
# linear case
return (1/T)
else:
# non-linear
return (1.5/T)
if deriv == 2:
# second derivative of lnQ
if n < 3:
# linear case
return (-1 / (T*T))
else:
# non-linear
return (-1.5 / (T*T))
ln_kTh = np.log(T) + np.log(BOLTZMANN) - np.log(PLANCK) # ln(kT/h) expressed in ln(Hz)
if n < 3:
# linear molecule
B = ABC_GHz[0] * 1.0e9 # convert to Hz
lnQ = ln_kTh - np.log(symno * B)
else:
# polyatomic molecule with 3 constants
lnQ = 1.5 * ln_kTh + 0.5 * np.log(PI) - np.log(symno)
for c in ABC_GHz:
B = c * 1.0e9 # convert to Hz
lnQ -= 0.5 * np.log(B)
return lnQ
##
def lnQvib(T, freqs, deriv=0):
# Given a temperature (in K) and array of vibrational
# frequencies (in cm^-1), return ln(Q) where Q is
# the harmonic-oscillator partition function.
kTh = T * BOLTZMANN / PLANCK # kT/h expressed in Hz
lnQ = 0.
nu = freqs * 100 # convert to m^-1 (as array)
nu = nu * CLIGHT # convert to Hz
fred = nu / kTh # reduced frequencies
x = np.exp(-fred) # exponentiated, reduced frequencies
xm1 = 1 - x
if deriv == 1:
# derivative of lnQ
term = nu * x / xm1
d = term.sum()
return (d / (kTh*T))
if deriv == 2:
# 2nd derivative of lnQ
t1 = nu * (1/xm1 - 1)
sum1 = -2 * t1.sum() / (kTh * T * T)
t2 = nu * nu * x / (xm1 * xm1)
sum2 = t2.sum() / (kTh * kTh * T * T)
return (sum1 + sum2)
# return lnQ itself
lnq = np.log(xm1)
lnQ = -1 * lnq.sum()
return lnQ
##
def typeCoord(crds):
# 'Geometry' (a Geometry object)
# 'cartesian' (a list of elements and list/array of cartesians)
# 'ZMatrix' (a ZMatrix object)
if isinstance(crds, Geometry):
intype = 'Geometry'
elif isinstance(crds, ZMatrix):
intype = 'ZMatrix'
elif isinstance(crds, list) and (len(crds) == 2) and (
(len(crds[0]) == len(crds[1])) or (len(crds[0]) * 3 == len(crds[1])) ):
# 'cartesian' is plausible
intype = 'cartesian'
else:
print_err('autodetect')
return intype
##
def parse_ZMatrix(zlist, unitR='angstrom', unitA='degree'):
# Given a list of all the lines of a z-matrix,
# return a ZMatrix object
el = []
refat = []
var = []
val = {}
intop = True
maxlen = 0 # keep track of max number of words in line,
# because its decrease will signal the beginning of the
# second section of the z-matrix (if any)
regexSplit = re.compile('[\s,=]+')
for line in zlist:
words = regexSplit.split(line) # split on whitespace, comma, or equals
nwords = len(words)
if nwords < 1:
continue # ignore blank line
maxlen = max(maxlen, nwords)
if nwords < maxlen:
intop = False
if intop:
# list of atoms and variable names (or floats)
# add element symbol
el.append(words[0])
# add variable (str|float)'s
var.append([])
for i in range(2, nwords, 2):
try:
var[-1].append(float(words[i]))
except:
# symbolic z-matrix variable (str type)
var[-1].append(words[i])
# add list of atoms to which variables refer
refat.append([])
for i in range(1, nwords, 2):
refat[-1].append(int(words[i]) - 1) # subtract one from user-viewed index
else:
# values of any z-matrix variables
val[words[0]] = float(words[1])
ZM = ZMatrix(el, refat, var, val, unitR=unitR, unitA=unitA)
return ZM
##
class ZMatrix(object):
# symbolic or numerical z-matrix
# initialize empty and then add to it
# indices are zero-based but user will be one-based
def __init__(self, el=[], refat=[], var=[], val={}, vtype={}, unitR='angstrom', unitA='radian'):
# this structure corresponds with the usual way of writing
# a z-matrix, with one atom defined per line
self.el = el # element symbols; should be in correct order
self.refat = refat # list of [list of ref. atoms that define position of this atom]
self.var = var # list of [list of z-matrix vars/constants that define this atom pos.]
self.val = val # dict of float values of any symbolic z-matrix variables
self.vtype = vtype # dict of names of variable types ('distance', 'angle', 'dihedral')
self.unitR = unitR # for distances
self.unitA = unitA # for angles and dihedrals ('radian' or 'degree')
self.coordtype = 'ZMatrix'
self.charge = None # optional
self.spinmult = None # optional
if len(val) != len(vtype):
# generate the vtype's automatically
self.vtypeBuild()
def vtypeBuild(self):
# categorize the variables
# this is important because they have different units
category = ['distance', 'angle', 'dihedral']
for iat in range(self.natom()):
# loop over atoms
for ivar in range(len(self.var[iat])):
# loop over names of z-matrix variables for this atom
# it's left-to-right, so vars are in the order in 'category'
v = self.var[iat][ivar] # name of a variable
if ivar > 2:
self.vtype[v] = 'unknown'
else:
self.vtype[v] = category[ivar]
return
def varMask(self, varlist):
# given a list of z-matrix variable names, return a numpy array of Boolean
# showing which indices [from ZMatrix.fromVector()] correspond
blist = []
for var in sorted(self.val):
blist.append(var in varlist)
return np.array(blist)
def canonical_angles(self):
# shift all dihedral angles into the range (-pi, pi]
for varname in self.val:
if self.vtype[varname] == 'dihedral':
self.val[varname] = angle_canon(self.val[varname], unit=self.unitA)
return
def cap_angles(self):
# force all bond angles to be in the range (0, pi)
for varname in self.val:
if self.vtype[varname] == 'angle':
if self.unitA == 'degree':
if self.val[varname] >= 180.:
self.val[varname] = 179.9
if self.val[varname] < 0.:
self.val[varname] = 0.1
else:
# radian
if self.val[varname] >= PI:
self.val[varname] = PI - 0.0002
if self.val[varname] < 0.:
self.val[varname] = 0.0002
return
def adjust_dTau(self, dX):
# given a vector of coordinate differences, move
# dihedral angle differences into the range (-pi, pi]
i = 0
for k in sorted(self.val):
if self.vtype[k] == 'dihedral':
dX[i] = angle_canon(dX[i], unit=self.unitA)
i += 1
return dX
def toRadian(self):
# make sure all angles/dihedrals are in radian
if self.unitA == 'degree':
for v in self.val:
if self.vtype[v] in ['angle', 'dihedral']:
self.val[v] = np.deg2rad(self.val[v])
self.unitA = 'radian'
return
def toDegree(self):
# make sure all angles/dihedrals are in degree
if self.unitA == 'radian':
for v in self.val:
if self.vtype[v] in ['angle', 'dihedral']:
self.val[v] = np.rad2deg(self.val[v])
self.unitA = 'degree'
return
def toAngstrom(self):
# make sure all distances are in angstrom
if self.unitR == 'bohr':
for v in self.val:
if self.vtype[v] == 'distance':
self.val[v] *= BOHR
self.unitR = 'angstrom'
return
def toBohr(self):
# make sure all distances are in bohr
if self.unitR == 'angstrom':
for v in self.val:
if self.vtype[v] == 'distance':
self.val[v] /= BOHR
self.unitR = 'bohr'
return
def unitX(self):
# return (tuple) of units
return (self.unitR, self.unitA)
def toUnits(self, unitS):
# given (unitR, unitA), in either order, convert to those units
if 'angstrom' in unitS:
self.toAngstrom()
if 'bohr' in unitS:
self.toBohr()
if 'degree' in unitS:
self.toDegree()
if 'radian' in unitS:
self.toRadian()
return
def varlist(self):
# return a list of the variable names in standard (sorted) order
vlist = [k for k in sorted(self.val)]
return vlist
def toVector(self):
# return a numpy array containing the values of the coordinates
# they are sorted according to their names
vec = [self.val[k] for k in sorted(self.val)]
return np.array(vec)
def dict2vector(self, dictin):
# given a dict with keys that are the z-matrix variables,
# return a numpy array of the values (after sorting by name)
# there is no checking!
vec = [dictin[k] for k in sorted(self.val)]
return np.array(vec)
def vector2dict(self, vecin):
# given a vector, return a dict that has keys that
# are the z-matrix variables (sorted by name)
# No checking!
i = 0
dictout = {}
for k in sorted(self.val):
dictout[k] = vecin[i]
i += 1
return dictout
def fromVector(self, vec, unitS, add=False):
# replace current coordinates with those in 'vec' (list-like)
# if 'add' is true, add to coordinates instead of replacing
if unitS != self.unitX():
# convert ZMatrix units, then convert back
old_units = self.unitX()
self.toUnits(unitS)
unitS = False # use as a flag
i = 0
for k in sorted(self.val):
if add:
self.val[k] += vec[i]
else:
self.val[k] = vec[i]
i += 1
if unitS == False:
# convert units back
self.toUnits(old_units)
return
def toGeometry(self):
# generate Cartesian coordinates; return a Geometry object
# assume that the z-matrix makes sense; no checking!
newGeom = Geometry(units=self.unitR) # empty
#newGeom.units = self.unitR # angstrom or bohr
for i in range(self.natom()):
elem = self.el[i]
if i == 0:
# place first atom at the origin
newGeom.addatom(Atom(elem, [0.,0.,0.]))
elif i == 1:
# place second atom on the z-axis
zvar = self.var[i][0]
z = self.val[zvar]
newGeom.addatom(Atom(elem, [0.,0.,z]))
elif i == 2:
# place third atom in XZ plane
zvar = self.var[i][0] # distance
r = self.val[zvar]
rprev = [z, r] # for later use
zvar = self.var[i][1] # angle
theta = self.val[zvar]
if self.unitA == 'degree':
theta = np.deg2rad(theta)
z += -r * np.cos(theta) # displace from second atom
x = r * np.sin(theta)
newGeom.addatom(Atom(elem, [x,0.,z]))
else:
zvar = self.var[i][0] # distance
r = self.val[zvar]
zvar = self.var[i][1] # angle
theta = self.val[zvar]
zvar = self.var[i][2] # dihedral
phi = self.val[zvar]
if self.unitA == 'degree':
theta = np.deg2rad(theta)
phi = np.deg2rad(phi)
# find the three connected atoms (D-C-B-A) and get their coordinates
C = self.refat[i][0] # index of bonded atom
B = self.refat[i][1]
A = self.refat[i][2]
C = newGeom.atom[C].xyz
B = newGeom.atom[B].xyz
A = newGeom.atom[A].xyz
BC = C - B # vector from B to C
BA = A - B # vector from B to A
N = np.cross(BC, BA) # normal to plane ABC
# construct position for new atom
xp = normalize(np.cross(N, BC)) # unit vector toward A perp. to BC
yp = normalize(N)
dp = xp * np.cos(phi) + yp * np.sin(phi) # within plane perp. to BC
dp *= np.sin(theta)
zp = normalize(BC)
dp -= zp * np.cos(theta)
D = normalize(dp, length=r) + C
newGeom.addatom(Atom(elem, D))
return newGeom
def copy(self):
return copy.deepcopy(self)
def natom(self):
# number of atoms
return len(self.el)
def nDOF(self):
# number of degrees of freedom
return len(self.val)
def checkVals(self, verbose=True):
# check that all variables are defined
# print error message(s) if 'verbose' is True
errcount = 0
for v in [varname for varlist in self.var for varname in varlist]:
# loop over all variable names
if not v in self.val:
# missing variable
errcount += 1
if verbose:
print('*** Missing value for variable {:s} in Z-matrix'.format(v))
return errcount
def printstr(self, unitR='angstrom', unitA='degree'):
# print to a string, in specified units
pstr = ''
# first the list of atoms and variable names
for i in range(self.natom()):
pstr += self.el[i] # element symbol
for j in range(len(self.refat[i])):
pstr += ' {:d}'.format(self.refat[i][j] + 1) # +1 index offset for user viewing
try:
pstr += ' {:f}'.format(self.var[i][j]).rstrip('0') # omit trailing zeros
except:
# not a float; should be str
pstr += ' {:s}'.format(self.var[i][j])
pstr += '\n'
# last the list of variable values in requested units
pstr += '\n' # blank line
# find longest variable name, just to make the output pretty
wlong = max([len(varname) for varname in self.val])
for v in [varname for varlist in self.var for varname in varlist]:
# loop over all variable names, in order by atom
if v in self.val:
value = self.val[v]
if self.vtype[v] in ['angle', 'dihedral']:
if self.unitA != unitA:
# convert to requested unit for display
if unitA == 'degree':
value = np.rad2deg(value)
else:
value = np.deg2rad(value)
else:
# distance variable
if self.unitR != unitR:
# convert unit
if unitR == 'angstrom':
value *= BOHR
else:
value /= BOHR
pstr += '{:{width}s} {:f}'.format(v, value, width=wlong).rstrip('0') + '\n' # keep the decimal point
return pstr
def print(self):
# print to stdout
print(self.printstr())
return
def print_gradient(self, grad):
# assuming alphabetical ordering of variable names, print gradient
wlong = max([len(varname) for varname in self.val])
ivar = 0
for varname in sorted(self.val):
print('{:{width}s} {:f}'.format(varname, grad[ivar], width=wlong))
ivar += 1
def connection_table(self, tol=1.3):
# return a connection table
return self.toGeometry().connection_table(tol=tol)
def extended_connection_table(self, tol=1.3):
# return an extended connection table
return self.toGeometry().extended_connection_table(tol=tol)
def Coulomb_mat(self, select=0, bondtol=1.3):
# return a (possibly restricted) Coulomb matrix
return self.toGeometry().Coulomb_mat(select=select, bondtol=bondtol)
def separateNonbonded(self, tol=1.3):
# return a list of Geometry objects that are completely connected
return self.toGeometry().separateNonbonded(tol=tol)
def printXYZ(self, fname='', comment=''):
# write an Xmol XYZ file
self.toGeometry().printXYZ(fname, comment=comment)
return
def XmolXYZ(self, comment=''):
# return a string in Xmol's XYZ format
return self.toGeometry().XmolXYZ(comment)
##
def elz(ar, choice=''):
# return atomic number given an elemental symbol, or
# return elemental symbol given an atomic number
# If 'choice' is specified as 'symbol' or 'Z', return that.
# if 'ar' is a list, then return a corresponding list
symb = ['n',
'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne',
'Na', 'Mg', 'Al', 'Si', 'P', 'S', 'Cl', 'Ar',
'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe', 'Co', 'Ni',
'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr',
'Rb', 'Sr', 'Y', 'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd',
'Ag', 'Cd', 'In', 'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba',
'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd', 'Tb',
'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu',
'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir', 'Pt',
'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn',
'Fr', 'Ra',
'Ac', 'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk',
'Cf', 'Es', 'Fm', 'Md', 'No', 'Lr',
'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt',
'Ds', 'Rg', 'Cn', 'Nh', 'Fl', 'Mc', 'Lv', 'Ts', 'Og']
if type(ar) == str and not re.match(r'^\d+$', ar):
# this looks like an element symbol
ar = ar.title() # Title Case
if choice == 'symbol':
return ar
else:
if ar not in symb:
print_err('', '{:s} is not an element symbol'.format(ar))
else:
return symb.index(ar)
if type(ar) == list:
# process a list of atoms
vals = []
for el in ar:
vals.append(elz(el, choice))
return vals
# if we got here, the argument is an atomic number
try:
Z = int(ar)
except:
print('Error taking int of ar = in elz()', ar, type(ar))
return None
if choice == 'Z':
return Z
else:
try:
return symb[Z]
except ValueError:
print_err('', 'No element symbol for Z = {:d}'.format(Z))
##
def n_core(atno, code=''):
# given Z value (or element symbol) return number of core electrons
# if 'atno' is a stoichiometric dict of {'el' : number}, then return the sum for
# the whole molecule
# if the optional argument, 'code', is specified, the number will be the default
# for that quantum chemistry code
ncore = 0
if type(atno) == str:
# convert symbol to Z value
atno = elz(atno)
if type(atno) == dict:
# a molecular formula
for el, natom in atno.items():
ncore += n_core(el) * natom
return ncore
if code == 'gaussian09':
# default for Gaussian09 frozen-core calculations
core = {
# these are the minimum atomic numbers (Z) that have
# the given number of core elecrons (Z : ncore)
3 : 2,
11 : 10,
19 : 18,
37 : 36,
55 : 54, # this is a guess
87 : 86 # this is a guess
}
else:
core = {
# these are the minimum atomic numbers (Z) that have
# the given number of core elecrons (Z : ncore)
3 : 2,
11 : 10,
19 : 18,
31 : 28,
37 : 36,
49 : 46,
55 : 54,
81 : 78,
87 : 86
}
for ki in sorted(core):
if atno >= ki:
ncore = core[ki]
return ncore
##
def read_regex(regex, fhandl, idx=1):
# Return something from a line matchine a regular expression.
# First arg is the regular expression; idx is the match-group
# to return. Return a list of values from all matching lines.
fhandl.seek(0)
matches = []
regx = re.compile(regex)
for line in fhandl:
mch = regx.search(line)
if mch:
matches.append(mch.group(idx))
return matches
##
def spinname(m):
# given a spin multiplity (m = 2S+1), return the text name (or the reverse)
name = [ 'spinless', 'singlet', 'doublet', 'triplet', 'quartet', 'quintet', 'sextet',
'septet', 'octet', 'nonet', 'decet', 'undecet', 'duodecet' ]
try:
m = int(m)
if m in range(12):
return name[m]
else:
return str(m) + '-tet'
except:
# convert a string into the corresponding multiplicity
return name.index(m)
##
def max_not_exceed(bigser, target):
# args are: (1) a pandas Series
# (2) a target value
# return the largest value in 'bigser' that does not exceed 'target'
# This is useful for matching up line numbers.
smaller = bigser[bigser <= target]
return smaller.max()
##
def match_lineno(targno, numlist):
# return the index of the largest value in 'numlist' that does not exceed 'targno'
# This is for matching up line numbers.
a = np.array(numlist)
idx = np.argwhere(a <= targno)
i = idx.max()
return i
##
def hartree_eV(energy, direction='to_eV', multiplier=1):
# convert from hartree to eV or the reverse (if direction == 'from_eV')
if direction == 'to_eV':
return multiplier * energy * AU2EV
elif direction == 'from_eV':
return multiplier * energy / AU2EV
else:
# illegal direction
return 'unrecognized direction = {:s} in routine hartree_eV'.format(direction)
##
def starting_n(Ltype, nppe=0):
# given an orbital-angular momentum type ('s', 'p', etc.),
# return the lowest possible principal quantum number (1, 2, etc.)
# The optional second argument is the number of electrons that have
# been replaced by an ECP/pseudopotential
# This routine only handles the common cases
nmin = {'s': 1, 'p': 2, 'd': 3, 'f': 4, 'g': 5, 'h': 6}
cases = [2, 10, 18, 28, 36, 46, 54, 60, 68, 78, 92]
if nppe > 0:
# Some electrons have been replaced by ECP; adjust the explicit
# shell numbers accordingly
if (not nppe in cases):
print('*** Unhandled number of ECP-replaced electrons ***')
print('\tnppe = {:d} in routine "starting_n"'.format(nppe))
# But go ahead and apply the algorithm, anyway!
# determine number of shells replaced
rcore = {'s': 0, 'p': 0, 'd': 0, 'f':0}
resid = nppe
nf = (resid - 28) // 32 # number of f shells replaced
if nf > 0:
rcore['f'] = nf
resid -= nf * 14
nd = (resid - 10) // 18 # number of d shells replaced
if nd > 0:
rcore['d'] = nd
resid -= nd * 10
np = (resid - 2) // 8 # number of p shells replaced
if np > 0:
rcore['p'] = np
resid -= np * 6
ns = resid // 2 # number of s shells replaced
rcore['s'] = ns
resid -= ns * 2
if resid != 0:
print('*** Unexpected residual electrons in routine "starting_n" ***')
for L in rcore:
nmin[L] += rcore[L]
return nmin[Ltype.lower()]
##
def L_degeneracy(Ltype):
# given an orbital-angular momentum type ('s', 'p', etc.),
# return the degeneracy (1, 3, etc.)
degen = {'s': 1, 'p': 3, 'd': 5, 'f': 7, 'g': 9, 'h': 11, 'i': 13}
return degen[Ltype.lower()]
##
def combine_MOspin(df, col1='Orbital', col2='Spin', colnew='MO'):
# Given a pandas DataFrame, combine a numeric 'Orbital' field with
# a 'Spin' field ('alpha' or 'beta') to create a new 'MO' field
# that is a combination like '1a' or '5b'.
# Return that new DataFrame.
abbrev = {'alpha': 'a', 'beta': 'b', 'both': ''}
dfret = df.copy()
dfret[colnew] = df.apply(lambda x: str(x[col1])+abbrev[x[col2]], axis=1)
return dfret
##
class Atom(object):
# element symbol + cartesian coordinates + optional mass (default = 0)
def __init__(self, el, xyz, mass=0):
# 'el' : Element symbol or atomic number
# 'xyz': cartesian coordinates as list or numpy array
# 'mass': atomic mass in amu
self.el = elz(el, choice='symbol')
self.xyz = np.array(xyz, dtype=np.float64)
self.mass = mass
def Z(self):
# atomic number
return elz(self.el, 'Z')
def copy( self ):
if type(self).__name__ == 'LabeledAtom':
newatom = LabeledAtom(self.el, self.xyz, self.mass, self.label)
else:
# regular Atom object
newatom = Atom(self.el, self.xyz, self.mass)
return newatom
def newxyz(self, triple):
# replace current coordinates
self.xyz = np.array(triple)
return
def addxyz(self, triple):
# add to current coordinates with list or array
self.xyz = self.xyz + triple
return
def rotate(self, Rmat):
# multipy the coordinates by the specified matrix
self.xyz = Rmat.dot(self.xyz)
return
def rotate_quat(self, Rquat):
# quaternion rotation using 'Rquat'
p = quaternion.from_vector_part(self.xyz)
pp = Rquat * p * Rquat.conjugate()
self.xyz = quaternion.as_vector_part(pp)
return
def rotate_sphangle(self, sphangle):
# spherical angle that defines a quaternion rotation
Rquat = quaternion.from_spherical_coords(sphangle)
self.rotate_quat(Rquat)
return
def printstr( self ):
# print to a string (exclude mass)
return '{:s}\t{:9.5f}\t{:9.5f}\t{:9.5f}'.format(self.el, self.xyz[0], self.xyz[1], self.xyz[2])
def set_mass(self, m):
# set atom mass: either a number (in amu) or an option string
try:
m = float(m)
self.mass = m
except:
if m == 'atomic_weight':
self.mass = atomic_weight(self.el)
elif m == 'dominant':
self.mass = dominant_isotope(self.el)
else:
print_err('', 'Unrecognized option, m = {:s}'.format(str(m)))
return
def distance_to(self, point):
# return the distance to the point
d = distance(self.xyz, point)
return d
def print(self):
# print to stdout (including mass)
print(self.printstr())
return
##
class LabeledAtom(Atom):
# like an Atom, but carrying a label
def __init__(self, el, xyz, mass=0, label='label'):
Atom.__init__(self, el, xyz, mass)
# label each atom simply with its ordinal number
self.label = label
def printstr(self):
# print to a string (exclude mass)
return '{:s}\t{:9.5f}\t{:9.5f}\t{:9.5f}\t{:s}'.format(self.el, self.xyz[0], self.xyz[1],
self.xyz[2], str(self.label))
def print(self):
# print to stdout (including mass)
print(self.printstr())
return
def fromAtom(atom, label='label'):
# create from unlabeled Atom
newLA = LabeledAtom(atom.el, atom.xyz, atom.mass, label)
return newLA
def setLabel(self, label=''):
# change the label
self.label = label
return
##
def distance(pos1, pos2):
# return distance between two vectors (numpy)
# return NaN if the vectors have different dimensionality
if len(pos1) != len(pos2):
print('Unequal vector dimensions in "distance": dim1 = {:d}, dim2 = {:d}'.format(len(pos1), len(pos2)))
return np.nan
v = pos2 - pos1
d = np.linalg.norm(v)
return d
##
def structure_distance(Struct1, Struct2, align=True):
# Return "distance" between two structure objects
# return Nan if they are incompatible
# This is not RMSD, it is raw distance
if Struct1.coordtype != Struct2.coordtype:
# different types; distance does not make sense
return np.nan
if Struct1.natom() != Struct2.natom():
# different atom counts; distance does not make sense
return np.nan
v1 = Struct1.toVector()
if align:
v2 = RMSD_align(Struct2, Struct1).toVector()
else:
v2 = Struct2.toVector()
d = distance(v1, v2) # cartesian distance
return d
##
def angleabc(a, b, c, unit='radian'):
# return the angle a-b-c, where all are numpy arrays
v1 = a - b
v2 = c - b
s = np.dot( v1, v2 )
s /= np.linalg.norm(v1)
s /= np.linalg.norm(v2)
theta = np.arccos(s)
if unit == 'degree':
# requested unit is degrees
theta = np.rad2deg(theta)
return theta
##
class Geometry(object):
# a list of Atoms
# constructor does not accept masses
def __init__(self, *args, intype='1list', units='angstrom'):
# three input types are recognized:
# '2lists' : a list of elements and a list of coordinate triples
# '1list' : a list of [el, x, y, z] quadruples
# 'atlist' : a list of Atoms
# 'DataFrame' : a pandas DataFrame with four columns (Z, x, y, z)
self.coordtype = 'Geometry'
self.atom = []
self.units = units
self.charge = None # optional
self.spinmult = None # optional
self.comment = '' # optional
self.bondlist = None # filled by calls to self.bonded_list()
if len(args) == 0:
# return an empty Geometry
return
if intype == 'atlist':
# argument is already a list of Atoms
self.atom = list(args[0])
return
if intype == '1list':
# argument is a list of quadruples, [el, x, y, z]
for quad in args[0]:
at = Atom(quad[0], quad[1:4])
self.atom.append(at)
return
if intype == '2lists':
# first argument is a list of elements
# second argument is a list of triples
nsymb = len(args[0])
nxyz = len(args[1])
if nsymb != nxyz:
print('*** Inconsistent #symb = {:d} and #xyz = {:d} in Geometry initialization'.format(nsymb, nxyz))
return # empty
for iat in range(nsymb):
at = Atom(args[0][iat], args[1][iat])
self.atom.append(at)
return
if intype == 'DataFrame':
# argument is a four-column pandas DataFrame (Z, x, y, z)
for iat in range(len(args[0].index)):
elxyz = args[0].iloc[iat]
at = Atom(elxyz[0], elxyz[1:].tolist())
self.atom.append(at)
def copy(self, elements=[], atoms=[]):
# A restrictive list of elements XOR atom numbers may be provided
newgeom = self.__class__()
newgeom.units = self.units
newgeom.coordtype = self.coordtype
newgeom.charge = newgeom.spinmult = None
newgeom.comment = ''
if len(elements) > 0:
# copy only specified elements
for a in self.atom:
if (a.el in elements):
newgeom.addatom(a.copy())
elif len(atoms) > 0:
# copy only specified atoms (by index)
for i in atoms:
newgeom.addatom(self.atom[i].copy())
else:
# copy all atoms
for a in self.atom:
newgeom.addatom(a.copy())
# copy (charge, spin multiplicity, comment) only
# when we keep all the atoms
newgeom.charge = self.charge
newgeom.spinmult = self.spinmult
newgeom.comment = self.comment
# debugging
r = RMSD(self, newgeom)
if r > 1e-6:
print('RMSD with copy = ', r)
return newgeom
def addatom(self, atom):
self.atom.append(atom)
return
def append(self, geom2):
# given another Geometry object, append its atoms here
for at in geom2.atom:
self.addatom(at)
return
def delatom(self, iatom):
del self.atom[iatom]
return
def natom(self):
return len(self.atom)
def nDOF(self):
# number of degrees of freedom
return 3 * self.natom()
def set_masses(self, mlist):
# given a list of atom masses, assign these to the constituent Atoms
# If 'mlist' is a string, get masses elsewhere
if isinstance(mlist, str):
# mlist is a string
for i in range(self.natom()):
self.atom[i].set_mass(mlist)
else:
try:
if len(mlist) == self.natom():
for i in range(self.natom()):
self.atom[i].set_mass(mlist[i])
else:
print('Expected {:d} atom masses but received only {:d} in Geometry.set_masses()'.format(self.natom(), len(mlist)))
except:
# 'mlist' is not a list; assume scalar
for i in range(self.natom()):
self.atom[i].set_mass(mlist)
return
def set_atomic_weights(self):
# set each atom mass to its atomic weight
for a in self.atom:
a.set_mass('atomic_weight')
return
def mass(self):
# sum of masses of constituent atoms
m = 0
for a in self.atom:
m += a.mass
return m
def translate(self, vector):
# given a 3-vector, translate all atoms
for i in range(self.natom()):
self.atom[i].addxyz(vector)
return
def center(self, origin=np.zeros(3), use_masses=True):
# translate molecule to set center of mass at 'origin'
# if use_masses is False, the use geometric centroid instead of COM
C = self.COM(use_masses=use_masses)
vec = origin - C
self.translate(vec)
return
def rotate(self, Rmat):
# given a 3x3 rotation matrix, multiply all atomic coords
for A in self.atom:
A.rotate(Rmat)
return
def rotate_quat(self, Rquat):
# given a rotational quaternion, rotate the molecule
for A in self.atom:
A.rotate_quat(Rquat)
return
def rotate_sphangle(self, sphangle):
# spherical angle that defines a quaternion rotation
Rquat = quaternion.from_spherical_coords(sphangle)
self.rotate_quat(Rquat)
return
def invert(self):
# invert all coordinates
for A in self.atom:
A.xyz *= -1
return
def reflect(self, normal=[0,0,1.]):
# reflect through plane specified by its normal vector
# default is the XY plane
nrm = np.array(normal)
nrm /= np.linalg.norm(nrm)
for A in self.atom:
xnew = A.xyz - 2 * np.dot(A.xyz, nrm) * nrm
A.newxyz(xnew)
return
def scale(self, scale):
# scale (multiply) all coordinates by the specified factor
for at in self.atom:
at.xyz *= scale
return
def COM(self, use_masses=True):
# center of mass
com = np.zeros(3)
if self.mass == 0:
# cannot use masses
use_masses = False
if use_masses:
# ordinary center of mass
for a in self.atom:
com += a.xyz * a.mass
if a.mass == 0:
print_err('', 'atom has zero mass', halt=False)
com /= self.mass()
else:
# geometric center (no masses)
for a in self.atom:
com += a.xyz
com /= self.natom()
return com
def copyxyz(self, Geom2):
# copy the atomic coordinates from Geom2
for at, at2 in zip(self.atom, Geom2.atom):
if at.el != at2.el:
print_err('', f'Different atoms {at.el} != {at2.el}')
at.newxyz(at2.xyz)
return
def element_indices(self, elem):
# return list of indices of atoms that match 'elem'
el = elz(elem, choice='symbol')
idx = []
for i, at in enumerate(self.atom):
if el == elz(at.el, choice='symbol'):
idx.append(i)
return idx
def find_element(self, el):
# old, redundant
print('>>> this method is old and redundant')
return self.element_indices(el)
def randomize_atom_numbering(self):
# re-number atoms randomly; may be useful for software testing
idx = np.random.permutation(self.natom())
self.atom = [self.atom[i] for i in idx]
return
'''
def renumber_closest_to(self, Gref):
# renumber atoms (by element) to be closest to a reference Geometry
# no error-checking here!
idx = np.arange(Gref.natom(), dtype=int)
elems = Gref.stoichiometry(asdict=True).keys()
for el in elems:
# for each element, find closest atom
id0 = Gref.element_indices(el)
Gel0 = Gref.subMolecules([id0])[0] # sub-Geometry of element
id = self.element_indices(el)
Gel1 = self.subMolecules([id])[0]
dmat = cdist(Gel0.separateXYZ()[1], Gel1.separateXYZ()[1])
imin = np.argmin(dmat, axis=0)
idx[id0] = np.array(id)[imin]
# do the renumbering
self.renumber_atoms(idx)
return
def distance_fit_to(self, Gref, index=False):
# find smallest RMS distance to atoms of same elements (duplicate
# matches are not allowed))
# return the sum of the distances
# if 'index', also return the matching atom numbers
elems = self.stoichiometry(asdict=True).keys()
iused = []
dsq = 0
for el in elems:
# for each element, find closest atom that has not already matched
id0 = Gref.element_indices(el)
Gel0 = Gref.subMolecules([id0])[0] # sub-Geometry of element
id = self.element_indices(el)
Gel1 = self.subMolecules([id])[0]
dmat = cdist(Gel0.separateXYZ()[1], Gel1.separateXYZ()[1])
for icol in range(len(id)):
jsort = np.argsort(dmat[:, icol])
for j in jsort:
if id[j] not in iused:
# use this one
dsq += dmat[j, icol] ** 2
iused.append(id[j]) # don't use it again
break
rms = np.sqrt(dsq / self.natom())
if index:
return rms, iused
return rms
def minimize_RMSD_rotation(G, Gref):
# Brute-force (Nelder-Mead) minimization of RMSD
# return the minimized RMSD and the asociated
# rotational quaternion
# atom numbering must be consistent
res = optimize.minimize(rotated_RMSD, [0, 0], args=(G, Gref),
method='Nelder-Mead')
rmsd = res.fun
Rquat = quaternion.from_spherical_coords(res.x)
return rmsd, Rquat
##
def distance_closest_match0(self, Gref, index=False):
# find RMS distance to atoms of same elements (duplicate
# matches are not allowed))
# return the sum of the distances
# if 'index', also return the matching atom numbers
if self.stoichiometry() != Gref.stoichiometry():
print_err('', 'mismatching stoichiometries: self = {:s}, Gref = {:s}'.format(self.stoichiometry(),
Gref.stoichiometry()))
elems = self.stoichiometry(asdict=True).keys()
elem_order = Gref.separateXYZ()[0]
iused = {el: [] for el in elems}
dsq = 0
for el in elems:
# for each element, find closest atom that has not already matched
id0 = Gref.element_indices(el)
Gel0 = Gref.subMolecules([id0])[0] # sub-Geometry of element
id = self.element_indices(el)
Gel1 = self.subMolecules([id])[0]
dmat = cdist(Gel0.separateXYZ()[1], Gel1.separateXYZ()[1])
for icol in range(len(id)):
jsort = np.argsort(dmat[:, icol])
for j in jsort:
if id[j] not in iused[el]:
# use this one
dsq += dmat[j, icol] ** 2
iused[el].append(id[j]) # don't use it again
break
rms = np.sqrt(dsq / self.natom())
# put the elements in the reference order
idx = []
for el in elem_order:
idx.append(iused[el].pop(0))
if index:
return rms, idx
return rms
'''
def distance_closest_match(self, Gref, index=False):
# find RMS distance to atoms of same element with the same
# bonding environment (duplicate matches not allowed)
# return the RMS of the distances
# if 'index', also return the matching atom numbers
# this version less efficient but maybe will work
if self.stoichiometry() != Gref.stoichiometry():
print_err('', 'mismatching stoichiometries: self = {:s}, Gref = {:s}'.format(self.stoichiometry(),
Gref.stoichiometry()))
neighb = self.connected_elems()[0] # list of strings
refneig = Gref.connected_elems()[0]
if sorted(neighb) != sorted(refneig):
print(self.comment)
print(sorted(neighb))
self.printXYZ('bad.xyz')
print(Gref.comment)
print(sorted(refneig))
Gref.printXYZ('badref.xyz')
print('units:', self.unitX(), Gref.unitX())
print_err('', 'mismatching bonding environments')
idx = []
dsq = 0
dmat = cdist(self.separateXYZ()[1], Gref.separateXYZ()[1])
for icol, at in enumerate(Gref.atom):
# find closest atom (of same element) that has not already matched
jsort = np.argsort(dmat[:, icol])
for j in jsort:
jatom = self.atom[j]
if (at.el == jatom.el) and (refneig[icol] == neighb[j]) and (j not in idx):
# use this one
dsq += dmat[j, icol] ** 2
idx.append(j) # don't use it again
break
natom = self.natom()
rms = np.sqrt(dsq / natom)
if len(idx) != natom:
# not all atoms were assigned (failure)
rms = np.inf
if index:
return rms, idx
return rms
def renumber_atoms(self, newnums):
# re-number the atoms according to list 'newnums'
nlen = len(newnums)
nunique = len(set(newnums))
if nlen != nunique:
print_err('', 'Only {:d} unique atom numbers were requested'.format(nunique))
if nlen != self.natom():
print_err('', 'Only {:d} atom numbers were specified'.format(nlen))
neworder = [self.atom[i] for i in newnums]
self.atom = neworder
return
def inertia_tensor(self):
# return 3x3 inertia tensor
mvec = self.massVector()
elem, triples = self.separateXYZ()
inertia = inertia_tensor(mvec, triples)
return inertia
def rotational(self, mass=True, posdet=True):
# return rotational constants (GHz), moments of inertia (kg.m^2),
# and principal axes (columns)
# input units are assumed to be angstrom and amu
### around the center of mass ###
# if mass == False, set all atomic masses equal before calculating
# if posdet == True, require that the determinant of the eigenvector
# matrix be positive
centered = self.copy()
if not mass:
# set all masses = 1
centered.set_masses(1.)
centered.center()
imat = centered.inertia_tensor()
moment, axes = np.linalg.eigh( imat )
# convert moment to kg.m^2, assuming distances in angstrom and masses in u
moment /= 1.0e20 * AVOGADRO * 1000.0
rotconst = PLANCK / ( 8 * PI * PI * CLIGHT * moment ) # now in units (1/m)
rotconst *= CLIGHT * 1.0e-9 # now in GHZ
det = np.linalg.det(axes)
if det < 0:
# reverse the B axis
axes[:,1] *= -1
return rotconst, moment, axes
def align_principal_axes(self, Gref, mass=True, mindet=0.9, quiet=False):
# rotate so that principal axes align with those of 'Gref'
# include masses unless 'mass' == False
# return the rotation matrix
C = self.copy()
Cref = Gref.copy()
if not mass:
# set all atom masses = 1
C.set_masses(1.)
Cref.set_mass(1.)
elif C.mass() * Cref.mass() == 0:
# masses are needed but have not been set; assign atomic weights
C.set_atomic_weights()
Cref.set_atomic_weights()
ctr = C.COM() # save the COM
C.center()
Cref.center()
# inertial axes
ABC0, I0, pax0 = Cref.rotational()
ABC1, I1, pax1 = C.rotational()
rmat = pax1 * np.linalg.inv(pax0)
# check for singularity (or negative determinant)
det = np.linalg.det(rmat)
if det < mindet:
if not quiet:
print_err('', 'rotation aborted: rmat has bad det = {:.3f}'.format(det),
halt=False)
else:
# pax1 = rmat * pax0
C.rotate(rmat)
# rotate COM and add it back
rctr = np.dot(ctr, rmat)
C.translate(rctr)
# transfer coordinates to self
for atold, atnew in zip(self.atom, C.atom):
atold.newxyz(atnew.xyz)
return rmat
def massVector(self, tripled=False):
# return 1D vector of atomic masses
# if 'tripled', repeat each mass three times (to match coordinates)
n = 1
if tripled:
n = 3
vmass = [[a.mass]*n for a in self.atom]
vmass = np.array(vmass).flatten()
return vmass
def suppress_translation(self, direction):
# given a displacement vector, remove net translation and return the adjusted vector
# construct vector of masses
vmass = self.massVector(tripled=True)
if np.any(vmass <= 0.):
print_err('', 'an atom has non-positive mass')
transl = np.multiply(vmass, direction) / self.mass()
transl = transl.reshape(-1, 3)
center = transl.sum(axis=0)
# subtract this 'center' from the input direction
dnew = direction.reshape(-1,3) - center
return dnew.flatten()
def suppress_rotation(self, direction, thresh=0.001, maxiter=1000):
# given a displacement vector, suppress net rotation and return the adjusted vector
# crummy iterative method
v = direction.reshape(-1,3)
r = self.toVector().reshape(-1,3) # atomic positions
m = self.massVector() # atomic masses
I = ( (r*r).T * m ).T.sum() # total moment of inertia
iter = 0
while True:
L = angular_momentum(m, r, v)
Lnorm = np.linalg.norm(L)
#print('Lnorm = {:.4f} at iteration {:d}'.format(Lnorm, iter))
if Lnorm < thresh:
return v.flatten()
w = L/I # angular velocity
u = np.cross(r, w) # velocity adjustment
v += u
iter += 1
if iter > maxiter:
print('*** warning: maxiter = {:d} exceeded in calm_rotation()'.format(maxiter))
def toAngstrom(self):
# ensure that units are angstrom
if self.units == 'bohr':
# multiply all coordinates by 'BOHR' constant
for a in self.atom:
a.xyz *= BOHR
self.units = 'angstrom'
return
def toBohr(self):
# ensure that units are bohr
if self.units == 'angstrom':
# divide all coordinates by 'BOHR' constant
for a in self.atom:
a.xyz /= BOHR
self.units = 'bohr'
return
def toUnits(self, unitS):
# given tuple of units, convert to those units
if 'angstrom' in unitS:
self.toAngstrom()
if 'bohr' in unitS:
self.toBohr()
return
def unitX(self):
# return (tuple) of units
return (self.units,)
def print(self, numbering=None):
# printing routine
# to number the atoms from N, set numbering=N
if type(self).__name__ == 'LabeledGeometry':
header = 'el\t x\t\t y\t\t z\t\tlabel'
else:
# regular Geometry object
header = 'el\t x\t\t y\t\t z'
if numbering is not None:
header += '\t\t#'
if self.units == 'bohr':
header += '\t(units=bohr)'
print(header)
if numbering is None:
for atom in self.atom:
atom.print()
else:
# print with numerical labels starting from 'numbering'
for iat, atom in enumerate(self.atom):
lbl = '{:d}'.format(numbering + iat)
LabeledAtom.fromAtom(atom, label=lbl).print()
# print any charge and spin multiplicity
try:
print('charge = {:.1f}'.format(self.charge))
except:
# not a problem
pass
try:
print('spinmult = {:.1f}'.format(self.spinmult))
except:
# not a problem
pass
return
def XmolXYZ(self, comment='', coord_only=False):
# return a string in Xmol's XYZ format
# if coord_only==True, omit the first two lines (so not Xmol format anymore)
if comment == '':
# supply a default comment line
comment = 'molecular composition is {:s}'.format(self.stoichiometry())
if self.units == 'bohr':
comment += '\t(units=bohr)'
if not coord_only:
xstr = '{:d}\n{:s}\n'.format(self.natom(), comment)
else:
xstr = ''
for a in self.atom:
xstr += '{:s}\t{:10.5f}\t{:10.5f}\t{:10.5f}\n'.format(a.el, a.xyz[0], a.xyz[1], a.xyz[2])
return xstr
def printXYZ(self, fname='', comment='', handle=False):
# print a string in Xmol's XYZ format, to file or stdout
if comment == '':
comment = self.comment
if handle:
# 'fname' is a file pointer
fname.write(self.XmolXYZ(comment=comment))
else:
# 'fname' is the name of a file or blank
if len(fname) > 0:
# print to specified file; over-write existing data
with open(fname, 'w') as f:
f.write(self.XmolXYZ(comment=comment))
else:
# print to stdout
print(self.XmolXYZ(comment=comment))
return
def separateXYZ(self):
# return a list with two elements:
# [element symbols]; [array of cartesian triples]
elem = []
triples = []
for a in self.atom:
elem.append(a.el)
triples.append(a.xyz)
return [elem, np.array(triples)]
def varlist(self):
# return a list of (formal) variable names
vlist = []
for i in range(self.natom()):
n = str(i)
vlist += ['x_'+n, 'y_'+n, 'z_'+n]
return vlist
def toVector(self):
# return a numpy array with all coordinates
elem, triples = self.separateXYZ()
return triples.flatten()
def fromVector(self, vec, unitS, add=False):
# given a flat vector of coordinates, replace the current coordinates
# unitS[0] is the distance unit of the vector
# if 'add' is True, then add to the current coordinates instead
# of replacing them
if unitS[0] != self.units:
# convert vector to Geometry units
if self.units == 'angstrom':
if unitS[0] == 'bohr':
vec *= BOHR
else:
print('** unrecognized units: unitS[0] = {:s}'.format(unitS[0]))
elif self.units == 'bohr':
if unitS[0] == 'angstrom':
vec /= BOHR
else:
print('** unrecognized units: unitS[0] = {:s}'.format(unitS[0]))
else:
print("** I don't recognize my own units! self.units = {:s}".format(self.units))
triples = np.array(vec).reshape((-1,3))
for i in range(self.natom()):
if add:
self.atom[i].addxyz(triples[i])
else:
self.atom[i].newxyz(triples[i])
return
def stoichiometry(self, asdict=False):
# stoichiometry string (without charge or spin multiplicity)
# build hash of elements and their atom counts
acount = {}
for a in self.atom:
try:
acount[a.el] += 1
except:
acount[a.el] = 1
if asdict:
return acount
stoich = stoichiometry(acount)
return stoich
def distance(self, i, j, unit=''):
# distance between atoms i and j
# use unit if requested; default is not to change units
try:
d = distance(self.atom[i].xyz, self.atom[j].xyz)
except IndexError:
s = '*** Illegal atom number in Geometry.distance(): ' + \
'i = {:d}, j = {:d}'.format(i, j)
print(s)
return np.nan
if unit == 'angstrom' and self.units == 'bohr':
d *= BOHR # convert bohr to angstrom
if unit == 'bohr' and self.units == 'angstrom':
d /= BOHR # convert angstrom to bohr
return d
def vec(self, i, j, norm=None):
# return the vector pointing from atom i to atom j
# is 'norm' is not None, then normalize the vector
# length to 'norm'
v = self.atom[j].xyz - self.atom[i].xyz
if norm is None:
return v
else:
# normalize to specified length
return normalize(v, norm)
def angle(self, i, j, k, unit='degree'):
# bond (or other) angle defined by atoms i, j, k
try:
a = angleabc(self.atom[i].xyz, self.atom[j].xyz, self.atom[k].xyz, unit=unit)
return a
except IndexError:
s = '*** Illegal atom number in Geometry.angle(): ' + \
'i = {:d}, j = {:d}, k = {:d}'.format(i, j, k)
print(s)
return np.nan
def dihedral(self, i, j, k, l, typ='linear', unit='radian'):
# calculate dihedral angle in radians (optionally in 'degree')
# typ='linear' : connectivity is i-j-k-l
# dihedral is between planes ijk and jkl
# typ='branched' : connectivity is i-j<kl (i, k and l all bonded to j)
# dihedral is between planes ijk and jkl (conforming with Avogadro)
a = self.vec(j, i)
b = self.vec(j, k)
c = self.vec(k, l)
if typ == 'branched':
c = self.vec(j, l)
b = normalize(b)
x = a - b * np.dot(a, b) # component of a normal to b
z = c - b * np.dot(c, b)
x = normalize(x)
z = normalize(z)
if ( np.linalg.norm(x) == 0.0) or ( np.linalg.norm(z) == 0.0):
# something is linear; dihedral is undefined
return np.nan
phi = np.arccos( np.dot(x,z) ) # in range [0, pi]
s = np.cross(x, z) # vector cross-product to get sign of dihedral
s = np.sign( np.dot(s,b) ) # parallel or antiparallel to b
phi *= s # include sign (right-handed definition)
if s == 0:
# x and z are parallel
if np.dot(x, z) > 0:
phi = 0
else:
phi = PI
if unit == 'degree':
phi *= 180 / PI
return phi
def simple_dihedrals(self, bondtol=1.3, unit='radian'):
# Return a list of all (redundant) linear dihedral angles.
# Each list element is a tuple:
# ( (i,j,k,l), angle_value )
xconn = self.extended_connection_table(bondtol)
pairs14 = np.argwhere(xconn == 3) # pairs of atoms 3 bonds apart
aldihe = []
for il in pairs14:
[i, l] = il.tolist()
if l < i:
# list each dihedral only once
continue
j = np.intersect1d( (np.argwhere(xconn[i,:] == 1)), (np.argwhere(xconn[l,:] == 2)) ).min()
k = np.intersect1d( (np.argwhere(xconn[i,:] == 2)), (np.argwhere(xconn[l,:] == 1)) ).tolist()
blist = np.where(xconn[j,:] == 1)[0]
k = np.intersect1d(k, blist).min()
ang = self.dihedral(i, j, k, l, 'linear', unit)
aldihe.append( ((i,j,k,l), ang) )
return aldihe
def find_methyls(self, bondtol=1.3):
# return list of tuples of atom numbers (C, H, H, H)
mlist = []
conn = self.connection_table(bondtol)
for i in range(self.natom()):
if self.atom[i].Z() == 6:
# a carbon atom
h = np.argwhere(conn[i,:] == 1).flatten()
if len(h) == 4:
# tetravalent carbon
hlist = []
for j in h:
if self.atom[j].Z() == 1:
# hydrogen atom
hlist.append(j)
if len(hlist) == 3:
# a methyl group; add to list
mlist.append( (i, *hlist) )
return mlist
def bonded(self, i, j, tol=1.3):
# return True if bonded, else False (based on distance only) (3/2/10)
# 'tol' tolerated amount of bond stretching
r0 = r0_ref(self.atom[i].el, self.atom[j].el)
if self.distance(i, j, unit='angstrom') < r0 * tol:
return True
return False
def bonded_list(self, tol=1.3):
# return a list of arrays of bonded atoms (by index)
# also store it as an attribute
natom = self.natom()
connex = self.connection_table(tol=tol)
bonded = [ np.argwhere(connex[i,:]).flatten() for i in range(natom) ]
# save to attribute variable
self.bondlist = bonded
return bonded
def distmat(self, unit='', variant=''):
# 2D array of interatomic distances (distance matrix )
# use unit if specified
# if variant = 'interfragment', zero out all distances
# within a bonded fragment
xyz = [a.xyz for a in self.atom]
dmat = cdist(xyz, xyz, metric='euclidean')
if (unit == 'angstrom') and (self.units == 'bohr'):
dmat *= BOHR # convert bohr to angstrom
print('>>> dmat from bohr to angstrom')
if (unit == 'bohr') and (self.units == 'angstrom'):
dmat /= BOHR # convert angstrom to bohr
print('>>> dmat from angstrom to bohr')
if variant == 'interfragment':
# intended only for nonbonded complexes
frags = self.find_fragments()
nfrag = len(frags)
if nfrag < 2:
# there is only one fragment!
return np.zeros_like(dmat)
for frag in frags:
for i in frag:
for j in frag:
dmat[i, j] = 0.
return dmat
def distances_to(self, point):
# return list of atom distances to specified point in space
# also the distance from COM to the point
dcom = distance(self.COM(), point)
dist = [a.distance_to(point) for a in self.atom]
return dist, dcom
def connection_table(self, tol=1.3):
# return a connection table: a 2D array indicating bonded distances (= 0 or 1)
# 'tol' is bond-stretch tolerance
dmat = self.distmat(unit='angstrom') / tol
connex = np.zeros_like(dmat, dtype=int)
for i in range(self.natom()):
for j in range(i):
# j < i
if dmat[i][j] < r0_ref(self.atom[i].el, self.atom[j].el):
connex[i][j] = 1
connex[j][i] = 1
return connex
def connected_elems(self, tol=1.3):
# return a list of connected atoms formatted as stoichiometric string
# and a list of bonded atoms (by index)
connex = self.connection_table(tol=tol)
slist = []
ilist = []
for i in range(connex.shape[0]):
adict = {}
jlist = np.argwhere(connex[i,:]).flatten()
for j in jlist:
try:
adict[self.atom[j].el] += 1
except:
adict[self.atom[j].el] = 1
slist.append(stoichiometry(adict))
ilist.append(jlist)
return slist, ilist
def extended_connection_table(self, tol=1.3):
# return a 2D array where A_ij is the number of bonded
# links to get from atom i to atom j
# Zeros on the diagonal and for unconnected atom pairs
xconn = self.connection_table(tol)
natom = xconn.shape[0]
changed = True
nbond = 1
while changed:
changed = False
for i in range(natom):
for j in range(natom):
if xconn[i][j] == nbond:
# j is 'nbonds' from i
# find atoms k that are bonded to j
for k in range(natom):
if (k != i) and (k != j) and (xconn[j][k] == 1) and (xconn[i][k] == 0):
# record this distance
xconn[i][k] = xconn[k][i] = nbond + 1
changed = True
nbond += 1
return xconn
def Coulomb_mat(self, select=0, bondtol=1.3):
# return a Coulomb matrix (atomic units)
# if 'select' != 0, then the matrix is zero
# except for atom pairs separated by 'select' number of bonds
# when 'select' == 0, 'bondtol' is irrelevant
zvals = [a.Z() for a in self.atom]
zmat = np.outer(zvals, zvals)
xconn = self.extended_connection_table()
nat = xconn.shape[0]
if select >= nat:
print('Warning: select = {:d} exceeds atom limit in Coulomb_mat(); setting to zero'.format(select))
select = 0
dmat = self.distmat('bohr')
if select > 0:
# destroy values at wrong bonded distances
dmat[np.where(xconn != select)] = np.inf
else:
# set only diagonal to inf (so that reciprocal will be zero)
np.fill_diagonal(dmat, np.inf)
return zmat/dmat
def subMolecules(self, lolist, ltype='index'):
'''
return a list of sub-molecules
arg 'lolist' is a list of lists
'ltype' indicates meaning of lolist:
'index' is a number
'label' only makes sense for LabeledGeometry
'''
geomlist = []
for lol in lolist:
# create an empty object for each list in lolist
newG = self.__class__()
newG.units = self.units
if ltype == 'index':
# sort indices to preserve atom ordering
for i in sorted(lol):
# 'i' is just the index in self.atom[]
newG.addatom(self.atom[i])
elif (ltype == 'label') and (type(self).__name__ == 'LabeledGeometry'):
for i in lol:
# 'i' is the label; add all matching atoms
m = False # flag
for at in self.atom:
if at.label == i:
newG.addatom(at)
m = True
if not m:
# no matching atom found
print('Found no atoms with label {:s} in LabeledGeometry.subMolecules()'.format(str(i)))
else:
print('Unrecognized ltype =', ltype, 'in LabeledGeometry.subMolecules()')
return None
geomlist.append(newG)
return geomlist
def separateNonbonded(self, tol=1.3):
# return a list of Geometry objects for all disconnected fragments
fragments = self.find_fragments(tol=tol)
# create the sub-molecules
submols = self.subMolecules(fragments, ltype='index')
return submols
def paxes_dots(self, unsigned=True, tol=1.3):
# dot products of first principal axes of nonbonded fragments
# (first axis corresponds to the smallest moment/largest rot. constant)
# if 'unsigned' == True, take absolute values
# returns a list
# may be useful in distinguishing cluster geometries
Frags = self.separateNonbonded(tol=tol)
pax = [Frag.rotational()[2] for Frag in Frags]
dots = []
nfrag = len(Frags)
for i in range(nfrag):
for j in range(i+1, nfrag):
# only consider the first axis
a = np.dot(pax[i][:,0], pax[j][:,0])
if unsigned:
a = abs(a)
dots.append(a)
return dots
def fragment_distances(self, loc='nearest', tol=1.3):
# Identify non-bonded fragments, then
# return the matrix of inter-fragment distances and
# another item (depending upon 'loc' value)
# loc == 'nearest' : minimal interatomic distance
# loc == 'center' : between geometric centers (no masses)
fragments = self.find_fragments(tol=tol)
nfrag = len(fragments)
sep = np.zeros((nfrag, nfrag)) # matrix of inter-fragment distances
if nfrag == 1:
# there is nothing to do (still return two values)
return sep, sep.tolist()
if loc == 'nearest':
# find the nearest atoms between all pairs of fragments
ijDist = self.distmat()
ijNearest = np.zeros((nfrag, nfrag)).tolist() # for storing the (i,j) atom numbers
for ifrag in range(nfrag):
mindist = np.inf
minj = mini = -1
for jfrag in range(ifrag):
for iat in fragments[ifrag]:
for jat in fragments[jfrag]:
if ijDist[iat][jat] < mindist:
# new closest pair
minj = jat
mini = iat
mindist = ijDist[iat][jat]
# record the closest atom pair for these two fragments
ijNearest[ifrag][jfrag] = (mini, minj)
ijNearest[jfrag][ifrag] = (minj, mini)
sep[ifrag][jfrag] = mindist
sep[jfrag][ifrag] = mindist
return sep, ijNearest
elif loc == 'center':
# find the distance between geometric centers
# (without mass-weighting)
cent = np.zeros((nfrag, 3)) # coordinates of fragment centers
# compute fragment centers
for ifrag in range(nfrag):
for iat in fragments[ifrag]:
cent[ifrag] += self.atom[iat].xyz
cent[ifrag] /= len(fragments[ifrag])
# compute distances between centers
for ifrag in range(nfrag):
for jfrag in range(ifrag):
sep[ifrag][jfrag] = np.linalg.norm(cent[jfrag] - cent[ifrag])
sep[jfrag][ifrag] = sep[ifrag][jfrag]
return sep, cent
else:
print_err('option', 'loc = {:s}'.format(loc))
def spread_fragments(self, dist=5.0, tol=1.3):
# displace fragments away from each other along
# closest inter-atom vectors, to distance 'dist'
# Return value is the number of fragments detected
sep, ijNearest = self.fragment_distances(loc='nearest', tol=tol)
nfrag = sep.shape[0]
if nfrag < 2:
# nothing to do
return nfrag
# compute the translation vectors
# each row in 'transl' is the translation to apply to all
# atoms in one fragment
transl = np.zeros( (nfrag, 3) )
for ifrag in range(nfrag):
for jfrag in range(ifrag):
(iat, jat) = ijNearest[ifrag][jfrag]
v12 = (self.atom[iat].xyz - self.atom[jat].xyz)
# adjust length of translation vector
curlen = np.linalg.norm(v12)
v12 = normalize(v12, (dist-curlen)/2)
transl[ifrag] += v12 # move fragment i away from fragment j
transl[jfrag] -= v12 # move fragment j away from fragment i
# apply the translations
fragments = self.find_fragments(tol=tol)
for ifrag in range(nfrag):
for iat in fragments[ifrag]:
self.atom[iat].addxyz(transl[ifrag])
return nfrag
def find_fragments(self, tol=1.3):
# return a list of [list of atom numbers] that are connected
natom = self.natom()
bonded = self.bonded_list(tol=tol)
# bonded[i] is the list of atoms that are connected to atom i (indices, not labels)
bunch = [] # list of lists; atom "bunches" that are intact molecules
remaining = list(range(natom)) # the indices of the atoms not yet assigned to a bunch
moved = False # a flag
while(len(remaining)):
if not moved:
# no atoms were moved last round; start a new bunch
seed = remaining.pop(0)
bunch.append([seed])
moved = True
for i in bunch[-1]:
moved = False
for j in bonded[i]:
if not j in bunch[-1]:
# move this atom into the current bunch
bunch[-1].append(j)
remaining.remove(j)
moved = True
return bunch
def assignTerminality(self, tol=1.3):
# assign a 'terminality' number to each atom;
# it's the number of iterations that the atom survives,
# where one iteration removes all terminal atoms
# Return a list of terminality numbers
# Atoms that can't be removed get terminality = -1
natom = self.natom()
terminality = np.zeros(natom, dtype=int)
remaining = np.arange(natom) # the indices of the atoms not yet removed
round = 0 # counter
while(len(remaining)):
# find the terminal atoms
buff = self.copy(atoms=remaining)
# count bonds
connex = buff.connection_table(tol=tol)
numbond = connex.sum(axis=0)
nonterminal = np.argwhere(numbond >= 2).flatten() # non-bonded is considered terminal
remaining = remaining[nonterminal]
terminality[remaining] += 1
round += 1
if len(remaining) == natom:
# no atoms were eliminated; only rings and linkers remain
terminality[remaining] = -1
break
else:
natom = len(remaining)
return terminality
def rings(self, minimal=False, tol=1.3):
# return a list of lists
# each sub-list is the indices of atoms in one ring
# rings are unique but may include other rings
termy = self.assignTerminality(tol=tol)
# 'nonterm' are atoms that terminus-removal cannot render terminal
# it includes all ring atoms and ring-ring linkages
nonterm = np.where(termy == -1)[0].tolist()
natom = len(nonterm) # number of atoms to consider
if natom < 3:
# no rings are possible
return []
self.bonded_list(tol=tol) # prepare self.bondlist using specified 'tol'
# follow paths until all atoms in 'nonterm' have been considered
paths = self.follow_paths(start=nonterm[0:1], restrict=nonterm)
# eliminate duplicates
rings = []
ringsets = []
for path in paths['ring']:
# is this path already in rings[] ?
pset = set(path)
if pset not in ringsets:
# add this to the list
rings.append(path)
ringsets.append(pset)
# if requested, eliminated redundant rings
if minimal:
# eliminate redundant large rings
ringsize = [len(ring) for ring in rings]
smallrings = []
ringatoms = set()
for iring in np.argsort(ringsize):
# loop over rings from smallest to largest
rset = set(rings[iring])
if not rset.issubset(ringatoms):
# some new atoms in this ring; add it
smallrings.append(rings[iring])
ringatoms = ringatoms.union(rset)
rings = smallrings
return rings
def follow_paths(self, start=[0], restrict=None):
# Start from last atom in path 'start' and walk through the atoms
# listed in 'restrict' until cycles or dead ends are reached.
# Return lists of lists of atoms separated into three categories
# (as dict): 'ring', 'straight'
# Recursive
if restrict is None:
# default: consider all atoms in the Geometry()
restrict = list(range(self.natom()))
if self.bondlist is None:
# use default tolerance to construct self.bondlist[]
print_err('', 'Creating bonded list using defaults', halt=False)
self.bonded_list()
if start[-1] not in restrict:
print_err('', 'starting atom {:d} is not in restrict list'.format(start[-1]))
paths = {'ring': [], 'straight': []} # return value
# find the next atoms to visit
icurr = start[-1] # the current atom
if len(start) > 1:
iprev = start[-2] # the previous atom
else:
iprev = np.nan
# create a new path for each following atom
nextatoms = [iat for iat in self.bondlist[icurr] if (iat in restrict) and (iat != iprev)]
if len(nextatoms) == 0:
# current atom is a terminus; end of recursion
paths['straight'].append(start)
return paths
# walk to following atom(s)
for iat in nextatoms:
# is this next atom already in the path?
if iat in start:
# yes; truncate the path to the ring and store it
paths['ring'].append(start[start.index(iat):])
continue
# not (yet) a ring; extend the path with this new atom
pathext = start + [iat]
# here is the recursive part: add the rest of the path
tails = self.follow_paths(pathext, restrict=restrict)
paths['ring'].extend(tails['ring'])
paths['straight'].extend(tails['straight'])
return paths
def torsions(self, tol=1.3):
# find all bonds with correct connectivity for proper dihedrals
connex = self.connection_table(tol=tol)
term = self.assignTerminality()
# find all bonds between non-terminal atoms
nonterm = np.where(term)[0]
subconn = np.transpose(connex[nonterm])[nonterm]
ntors = subconn.sum() // 2 # number of torsions
print('Found {:d} torsions'.format(ntors))
# make list of central atom pairs
pairs = []
(ilist, jlist) = np.where(subconn)
for i, j in zip(ilist, jlist):
# these indices show a bond
if i < j:
# don't include a bond twice
pairs.append([nonterm[i], nonterm[j]])
print('pairs:', pairs)
def bounding_sphere(self):
# return the center and radius of a "smallest" sphere enclosing the nuclei
xyz = self.separateXYZ()[1]
return small_enclosing_sphere(xyz)
##
class LabeledGeometry(Geometry):
# like a Geometry, but composed of LabeledAtom instead of Atom
def __init__(self, *args, intype='atlist', labels='', units='angstrom', istart=0):
# specify labels = 'present' if the atoms are already labeled
Geometry.__init__(self, *args, intype=intype, units=units)
if labels == 'present':
# atoms are already labeled
pass
else:
natom = self.natom()
for i in range(natom):
# replace each Atom with a LabeledAtom
if len(labels) >= natom:
# user-supplied list of atom labels
self.atom[i] = LabeledAtom.fromAtom(self.atom[i], labels[i])
else:
# use the atom number (starting from 'istart') as the label
self.atom[i] = LabeledAtom.fromAtom(self.atom[i], i+istart)
def setLabels(self, labels):
# change the labels on the LabeledAtoms
natom = self.natom()
if len(labels) != natom:
# this is not allowed; make no changes
print('Expected {:d} but received {:d} labels in LabeledGeometry.setLabels()'.format(natom, len(labels)))
return
else:
# change the labels
for i in range(natom):
self.atom[i].setLabel(labels[i])
return
def fromGeometry(geom, labels=''):
# create from unlabeled Geometry
Lmolec = LabeledGeometry(geom.atom, intype='atlist', labels=labels, units=geom.units)
return Lmolec
def getLabels(self):
# return the atom labels as a list
labels = [a.label for a in self.atom]
return labels
##
def atomic_weight(iz):
# return atomic weight given Z (3/21/2012) or elemental symbol (9/16/2014)
# values are from the NIST 2003 periodic table
# units are u (amu)
wt = [ 0, 1.00794, 4.002602, 6.941, 9.012182, 10.811, 12.0107, 14.0067, 15.9994, 18.9984032, 20.1797,
22.989770, 24.3050, 26.981538, 28.0855, 30.973761, 32.076, 35.453, 39.948,
39.0983, 40.078, 44.955910, 47.867, 50.9415, 51.9961, 54.938049, 55.845, 58.933200, 58.6934,
63.546, 65.409, 69.723, 72.64, 74.92160, 78.96, 79.904, 83.798,
85.4678, 87.62, 88.90585, 91.224, 92.90638, 95.94, 98, 101.07, 102.90550, 106.42,
107.8682, 112.411, 114.818, 118.710, 121.760, 127.60, 126.90447, 131.293,
132.90545, 137.327,
138.9055, 140.116, 140.90765, 144.24, 145, 150.36, 151.964, 157.25, 158.92534,
162.500, 164.93032, 167.259, 168.93421, 173.04, 174.967,
178.49, 180.9479, 183.84, 186.207, 190.23, 192.217, 195.078,
196.96655, 200.59, 204.3833, 207.2, 208.98038, 209, 210, 222,
223, 226,
227, 232.0381, 231.03588, 238.02891, 237, 244, 243, 247, 247,
251, 252, 257, 258, 259, 262,
261, 262, 266, 264, 277, 268 ]
if type( iz ) == int:
return wt[iz]
else:
# probably an elemental symbol
z = elz(iz)
return wt[z]
##
def xyz2Atom(atno, xyz):
# given Z value (or element symbol) and list [x, y, z], return an Atom
if type(atno) == int:
el = elz(atno)
else:
# we were probably given an element symbol, not an atomic number
el = atno
atno = elz(el)
m = atomic_weight(atno)
return Atom(el, xyz[0], xyz[1], xyz[2], m)
##
def xyz2Geometry(atnos, xyzs, units='angstrom'):
# args: list of atomic numbers; list of coordinates [x1, y1, z1, x2, y2, z2,...]
# return a Geometry
# 9/16/2014
#
# check for compatible list lengths
natom = len(atnos)
nxyz = len(xyzs)
if nxyz != 3 * natom:
print('Incompatible numbers of atoms and of coordinates:')
print('natom = {:d}, nxyz = {:d} in xyz2Geometry()'.format(natom, nxyz))
return None
# build Geometry one Atom at a time
molecule = Geometry(units=units)
for i in range(natom):
atno = atnos[i]
xyz = xyzs[3*i:3*i+3]
atom = xyz2Atom(atno, xyz)
molecule.addatom(atom)
return molecule
##
def JSdm(P, Q, base=4):
# Jensen-Shannon divergence metric; base=4 gives range = [0, 1]
# P and Q are *discrete* PDFs (with same data type)
# Allowed data types: tuple; list; dict; 1D numpy array
# P and Q must be same length, except when dict
# They will be L1-normalized here
# Return:
# (1) metric (float)
# (2) messages (list of string)
#
message = []
if type(P) != type(Q):
print('*** P and Q must be same data type in routine JSdm() ***')
return (None, None)
if (type(P) == list) or (type(P) == tuple) or (type(P) == np.ndarray):
P = np.array(P).astype(float)
Q = np.array(Q).astype(float)
allkeys = [] # length will be tested later, to infer input type
elif type(P) == dict:
# make a sorted list of all the keys
allkeys = sorted(set(list(P.keys()) + list(Q.keys())))
Plist = []
Qlist = []
for key in allkeys:
try:
Plist.append(P[key])
except:
# probably key is not present in this dict
Plist.append(0)
try:
Qlist.append(Q[key])
except:
Qlist.append(0)
if P.keys() != Q.keys():
message.append('Different key lists merged for P and Q')
# convert list to numpy array
P = np.array(Plist).astype(float)
Q = np.array(Qlist).astype(float)
else:
print('*** Unhandled data type in routine JSdm():', type(P))
return (None, None)
# No negative values are allowed
if len(np.where(P < 0)[0]) or len(np.where(Q < 0)[0]):
print('*** Negative values not allowed in routine JSdm() ***')
return (None, None)
# P and Q must have the same number of elements
if len(P) != len(Q):
print('*** P and Q must have same length in routine JSdm() ***')
return (None, None)
# Normalize both PDFs (L1-normalization)
Plen = P.sum()
Qlen = Q.sum()
if (Plen == 0) or (Qlen == 0):
print('*** P and Q may not be all zeros in routine JSdm() ***')
return (None, None)
P /= Plen
Q /= Qlen
pqsum = P + Q
# find any zeros in (P+Q) and delete corresponding elements in P, Q, and P+Q
nullidx = np.where(pqsum == 0)[0]
if len(nullidx > 0):
# delete the troublesome elements
if len(allkeys) > 0:
# input was dict
message.append('Deleted null elements with indices ' + str([allkeys[i] for i in nullidx]))
else:
# input was list-like
message.append('Deleted null elements with indices ' + str(nullidx))
P = np.delete(P, nullidx)
Q = np.delete(Q, nullidx)
pqsum = np.delete(pqsum, nullidx)
# compute the JSDM
# P or Q may still contain zeros, so don't take straight logarithm
# instead, use x*ln(y) = ln(y**x) and convention 0**0 = 1
s1 = 2 * P / pqsum
s2 = 2 * Q / pqsum
s1 = s1 ** P
s2 = s2 ** Q
s1 = np.log(s1) / np.log(base)
s2 = np.log(s2) / np.log(base)
dsq = (s1 + s2).sum()
return np.sqrt(dsq), message
##
def AOpopdiffmats(df1, df2):
# Compare two pandas DataFrames with Mulliken population data,
# as returned by routine 'read_AOpop_in_MOs()' in 'g09_subs.py'
# Return two numpy 2D-arrays:
# (1) JSdm() differences in AO populations (Jensen-Shannon divergence metric)
# (2) (E2-E1) orbital energy differences
# Also return two lists of MO numbers:
# (3) MO labels in df1 (rows of matrices)
# (4) MO labels in df2 (columns of matrics)
MOlist1 = sorted(set(df1.MO))
MOlist2 = sorted(set(df2.MO))
nmo1 = len(MOlist1)
nmo2 = len(MOlist2)
dPmat = np.zeros((nmo1, nmo2))
dEmat = np.zeros((nmo1, nmo2))
for imo in MOlist1:
# looping over MOs in first set
idx = MOlist1.index(imo) # row number in returned matrices
orb1 = df1[df1.MO == imo]
E1 = orb1.iloc[0]['Energy']
# convert AO populations into a dict
mulpop1 = {}
# create a label for each AO that looks like '#5-p' for a p-orbital on atom #5
for ao in orb1.index:
s = '#{:d}-{:s}'.format(orb1.loc[ao]['Atom#'], orb1.loc[ao]['L'])
c = orb1.loc[ao]['Contrib']
if c < 0:
# treat negative AO pop as a new variable (by changing its label)
s += '-neg'
c = abs(c)
mulpop1[s] = c
# loop over orbitals in second set
for jmo in MOlist2:
jdx = MOlist2.index(jmo) # column number in returned matrices
orb2 = df2[df2.MO == jmo]
E2 = orb2.iloc[0]['Energy']
dEmat[idx, jdx] = E2 - E1 # signed difference
# construct dict of AO populations as above
mulpop2 = {}
for ao in orb2.index:
s = '#{:d}-{:s}'.format(orb2.loc[ao]['Atom#'], orb2.loc[ao]['L'])
c = orb2.loc[ao]['Contrib']
if c < 0:
# negative AO pop
s += '-neg'
c = abs(c)
mulpop2[s] = c
# get JSdm distance between the two AO population vectors
dist = JSdm(mulpop1, mulpop2)
dPmat[idx, jdx] = dist[0]
return dPmat, dEmat, MOlist1, MOlist2
##
def orbitalPopMatch(df1, df2, Eweight=0.1, diagBias=0.001):
# Find which MOs correspond between two calculations.
# Note: Cannot distinguish degenerate orbitals!
# Compare two pandas DataFrames with Mulliken population data,
# as returned by routine 'read_AOpop_in_MOs()' in 'g09_subs.py'
# Argument 'Eweight' is the weight to give to energy differences.
# Argument 'diagBias' is the preference to give to the existing
# orbital numbering.
# Return a dict of MO number correspondences. The dict only includes
# orbitals that appear to be mismatched.
# Keys are MO labels in df2, values are MO labels in df1.
# Do not mix alpha with beta orbitals.
#
momap = {}
if (df1['Spin'] == 'alpha').any() & (df1['Spin'] == 'beta').any():
# this is a UHF case; keep alpha and beta orbitals separate
for sp in ['alpha', 'beta']:
set1 = df1[df1['Spin'] == sp]
set2 = df2[df2['Spin'] == sp]
momap.update(orbitalPopMatch(set1, set2, Eweight=Eweight, diagBias=diagBias))
return momap
# simple, single-spin case
dPmat, dEmat, MOs1, MOs2 = AOpopdiffmats(df1, df2)
# count the MOs in each orbital set
norb1 = len(MOs1)
norb2 = len(MOs2)
nmo = min(norb1, norb2)
# use unsigned energy differences
diffmat = dPmat + Eweight * np.fabs(dEmat)
# install the bias toward perserving the existing numbering
# Note: Gaussian prints the populations only to 0.01 precision
for i in range(norb1):
imo = MOs1[i]
try:
j = MOs2.index(imo)
diffmat[i, j] -= diagBias
except:
# probably an orbital missing from set 2
pass
# find closest distance for each row
rowmin = diffmat.min(axis=1)
# sort by increasing distance (i.e., best matches first)
rowlist = rowmin.argsort()
# truncate to smallest dimension
rowlist = rowlist[0 : nmo]
claimed = [] # list of orbitals in set2 as they are paired
pairing = {} # mapping between orbital indices (not MO numbers/labels)
for iorb in rowlist:
# loop over matrix rows, starting with those with best available matches
for jorb in diffmat[iorb, :].argsort():
# loop over columns, starting with best match
if jorb in claimed:
# this orbital already paired
continue
# this is a pairing
claimed.append(jorb)
pairing[iorb] = jorb
break # done with this first-set MO
# convert into a mapping of MO numbers
for i in pairing.keys():
imo = MOs1[i] # MO number from first set
j = pairing[i]
jmo = MOs2[j] # MO number from second set
if imo != jmo:
# report only non-identity mappings
momap[jmo] = imo # key is the MO number in the 2nd set
return momap
##
def relabelOrbitals(df, momap):
# re-label MOs based upon a mapping provided by 'orbitalPopMatch()'
# Return value: the DataFrame with orbitals re-labeled
#
# loop once through the rows, changing MO labels
for idx in df.index:
imo = df.loc[idx, 'MO']
if imo in momap.keys():
# change this MO label
df.loc[idx, 'MO'] = momap[imo]
return df
##
def readXmol(fh, units='angstrom', handle=False):
# Read an XYZ file (handle) and return (Geometry object, #atoms, comment)
# if 'handle' is True, expect a file handle instead of a file name
# Return a three-tuple
if not handle:
fh = open(fh, 'r')
try:
natom = int( fh.readline() )
comment = fh.readline().rstrip()
df = pd.read_csv(fh, names=['El', 'X', 'Y', 'Z'], delim_whitespace=True)
# check the number of atoms
if natom != df.shape[0]:
print('Expected {:d} atoms but found {:d}!'.format(natom, df.shape[0]))
return None
except:
print('Unable to read XMol file')
return None
if not handle:
fh.close()
return Geometry(df, intype='DataFrame', units=units), natom, comment
##
def r0_ref( elem1, elem2 ):
# return single-bonded distances between elements (Angstrom)
# from b3lyp/6-31g* calculations on molecules specified (3/2/10)
# added covalent radii 3/21/2012
if ( elem1 > elem2 ):
# put the elements in ascending lexical order
t = elem1
elem1 = elem2
elem2 = t
if elem1 == 'C':
if elem2 == 'C':
# C-C bond from C2H6
return 1.5306
if elem2 == 'H':
# C-H bond from CH4
return 1.0936
if elem2 == 'N':
# C-N bond from CH3NH2
return 1.4658
if elem2 == 'O':
# C-O bond from CH3OH
return 1.4192
if elem1 == 'H':
if elem2 == 'H':
# H-H bond from H2
return 0.743
if elem2 == 'N':
# N-H bond from CH3NH2
return 1.0189
if elem2 == 'O':
# O-H bond from CH3OH
return 0.9691
if elem1 == 'N':
if elem2 == 'N':
# N-N bond from N2H4
return 1.4374
if elem2 == 'O':
# N-O bond from NH2OH
return 1.4481
if elem1 == 'O':
if elem2 == 'O':
# O-O bond from HOOH
return 1.456
# unknown case; estimate from rough covalent radii
z1 = elz( elem1 )
z2 = elz( elem2 )
r1 = atomic_radius( z1 )
r2 = atomic_radius( z2 )
rsum = r1 + r2
return rsum
##
def atomic_radius( iz ):
# return covalent atomic radius given Z (3/21/2012) (Angstrom)
# values are from Wikipedia (attributed to Slater 1964);
# I filled blanks with a guess (e.g., Z-1 value)
r = [ 0, 0.25, 0.25, 1.45, 1.05, 0.85, 0.70, 0.65, 0.60, 0.50, 0.50,
1.80, 1.50, 1.25, 1.10, 1.00, 1.00, 1.00, 1.00,
2.20, 1.80, 1.60, 1.40, 1.35, 1.40, 1.40, 1.40, 1.35, 1.35,
1.35, 1.35, 1.30, 1.25, 1.15, 1.15, 1.15, 1.15,
2.35, 2.00, 1.80, 1.55, 1.45, 1.45, 1.35, 1.30, 1.35, 1.40,
1.60, 1.55, 1.55, 1.45, 1.45, 1.40, 1.40, 1.40,
2.60, 2.15,
1.95, 1.85, 1.85, 1.85, 1.85, 1.85, 1.85, 1.80, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75,
1.55, 1.45, 1.35, 1.35, 1.30, 1.35, 1.35,
1.35, 1.50, 1.90, 1.80, 1.60, 1.90, 1.90, 1.90,
2.80, 2.15,
1.95, 1.80, 1.80, 1.75, 1.75, 1.75, 1.75, 1.75, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75,
1.75, 1.75, 1.75, 1.75, 1.75, 1.75 ]
if type(iz) == int:
return r[ iz ]
else:
# convert symbol to nuclear charge
z = elz( iz )
return r[z]
##
def vdW_radius(iz):
# return van der Waals radius given Z (11/20/20) (Angstrom)
# values are from Wikipedia; only up to Ra (Z=88)
# missing values filled by linear interpolation
# for Z>88 just use 1.86 (value given for uranium)
r = [0, 1.10, 1.40,
1.82, 1.53, 1.92, 1.70, 1.55, 1.52, 1.47, 1.54,
2.27, 1.73, 1.84, 2.10, 1.80, 1.80, 1.75, 1.88,
2.75, 2.31, 0, 0, 0, 0, 0, 0, 0, 1.63, 1.40, 1.39,
1.87, 2.11, 1.85, 1.90, 1.85, 2.02,
3.03, 2.49, 0, 0, 0, 0, 0, 0, 0, 1.63, 1.72, 1.58,
1.93, 2.17, 2.06, 2.06, 1.98, 2.16,
3.43, 2.68] + [0] * 21 + [1.75, 1.66, 1.55,
1.96, 2.02, 2.07, 1.97, 2.02, 2.20,
3.48, 2.83]
if type(iz) != int:
# convert symbol to Z
iz = elz(iz)
if iz > 88:
vdw = 1.86 # a guess
else:
vdw = r[iz]
if vdw == 0:
# interpolate
prev = iz-1
post = iz+1
while r[prev] == 0:
prev -= 1
while r[post] == 0:
post += 1
dz = post - prev
delta = (r[post] - r[prev]) / dz
vdw = r[prev] + (iz - prev) * delta
# round to nearest 0.1 pm
return round(vdw, 3)
def from_ltriangle(vec):
# given a 1D numpy array that is a flattened lower-triangle,
# return the corresponding symmetric, square numpy array
n = len(vec)
dim = int(round(0.5 * (-1 + np.sqrt(1+8*n)))) # dimension of the square matrix
idx = np.tril_indices(dim)
mat = np.zeros((dim, dim))
mat[idx] = vec
# symmetrize
mat = mat + np.triu(mat.T, 1)
return mat
##
def inertia_tensor(masses, xyz):
# moment-of-inertia tensor of point-masses
# m is a list of masses, xyz is a numpy array of Cartesian triples
inertia = np.zeros((3,3))
n = len(masses)
if n != len(xyz):
print('Argument inconsistency in inertia_tensor(): {:d} masses but {:d} positions'.format(n, len(xyz)))
return None
for i in range(n):
m = masses[i]
(x, y, z) = tuple(xyz[i])
inertia[0][0] += m * (y*y + z*z)
inertia[1][1] += m * (x*x + z*z)
inertia[2][2] += m * (y*y + x*x)
inertia[0][1] -= m * x * y
inertia[0][2] -= m * x * z
inertia[1][2] -= m * y * z
inertia[1][0] = inertia[0][1]
inertia[2][0] = inertia[0][2]
inertia[2][1] = inertia[1][2]
return inertia
##
def orthogonalize_rows(M, norm=0):
# orthogonalize rows of numpy 2D array M
# normalize each row to length 'norm' if norm > 0
for i in range(M.shape[0]-1):
# project row 'i' from all later rows
v = M[i] / np.linalg.norm(M[i])
for j in range(i+1, M.shape[0]):
p = np.dot(v, M[j])
M[j] -= p * v
if norm > 0:
# normalize each row to specified length
nrm = np.linalg.norm(M, axis=1)
M = np.divide(M.T, nrm).T
return M
##
def vib_harmonic(fc, mass, sayvetz=False, xyz=[]):
# given numpy arrays of cartesian force constants and atomic masses,
# return harmonic frequencies (cm^-1) and mode vectors
# This function does not do Sayvetz projection unless requested
# the projection requires atomic coordinates (as flattened list)
# Following <NAME>'s description
mwt = [] # mass-weighting vector
for m in mass:
mwt.extend( [1/np.sqrt(m)] * 3 ) # same mass for (x,y,z) of an atom
wmat = np.outer(mwt, mwt) # mass-weighting matrix
# apply the mass-weighting matrix to the force constants
wfc = np.multiply(fc, wmat)
wfc /= AMU2AU # mass-weighted force constant matrix in atomic units
eigval, eigvec = np.linalg.eigh(wfc)
esign = np.sign(eigval) # save the sign of each eigenvalue
eigval = np.fabs(eigval) # all values are now positive
eigval = np.sqrt(eigval)
eigval = np.multiply(esign, eigval) # imaginary frequencies are "negative"
eigval *= AU2CM
if not sayvetz:
# no projections; return eigenvectors as rows
return eigval, eigvec.T
else:
# Use Sayvetz conditions to project out external coordinates
print('WARNING: SAYVETZ PROJECTION IS NOT WORKING!')
natom = len(mass)
dimen = 3 * natom
if len(xyz) != dimen:
print('Unable to do Sayvetz projects: {:d} masses but {:d} coordinates'.format(natom, len(xyz)))
return eigval, eigvec.T
# project out the translations and rotations
xyz = xyz.reshape(-1, 3) # each row of 'xyz' is now for one atom
com = np.zeros(3) # center of mass
mtot = 0 # total mass
for i in range(natom):
mtot += mass[i]
com += mass[i] * xyz[i]
com /= mtot
print('total mass = {:.3f}'.format(mtot))
print('center of mass:', com)
# translate COM to the origin
for i in range(natom):
xyz[i] -= com
# get principal axes
inert = inertia_tensor(mass, xyz)
print('inertial tensor:\n', inert)
inert_val, inert_vec = np.linalg.eigh(inert)
print('inert_val:', inert_val)
print('inert_vec:\n', inert_vec)
# translation S vectors (called D1, D2, D3 by Ochterski)
for i in range(natom):
mat = np.eye(3) * np.sqrt(mass[i])
try:
S = np.concatenate((S, mat), axis=1)
except:
# probably haven't created S yet
S = mat.copy()
# rotation S vectors (Ochterski's D4, D5, D6)
if False:
# following Ochterski
print('*** Following Ochterski\'s white paper')
for n in range(natom):
mat = np.zeros((3,3))
for i in [0, 1, 2]:
j = (i+1) % 3
k = (j+1) % 3
mat[i] = np.dot(xyz[n], inert_vec[j]) * inert_vec[k]
mat[i] -= np.dot(xyz[n], inert_vec[k]) * inert_vec[j]
mat[i] /= np.sqrt(mass[n])
try:
Sr = np.concatenate((Sr, mat), axis=1)
except:
# probably haven't created Sr yet
Sr = mat.copy()
S = np.concatenate((S, Sr), axis=0)
else:
# following G03 source code: routine TRVect() in utilnz.F
print('*** Following G03 source code')
for n in range(natom):
mat = np.zeros((3,3))
CP = np.dot(inert_vec, xyz[n])
mat[0,0] = CP[1]*inert_vec[2,0] - CP[2]*inert_vec[1,0]
mat[0,1] = CP[1]*inert_vec[2,1] - CP[2]*inert_vec[1,1]
mat[0,2] = CP[1]*inert_vec[2,2] - CP[2]*inert_vec[1,2]
mat[1,0] = CP[2]*inert_vec[0,0] - CP[0]*inert_vec[2,0]
mat[1,1] = CP[2]*inert_vec[0,1] - CP[0]*inert_vec[2,1]
mat[1,2] = CP[2]*inert_vec[0,2] - CP[0]*inert_vec[2,2]
mat[2,0] = CP[0]*inert_vec[1,0] - CP[1]*inert_vec[0,0]
mat[2,1] = CP[0]*inert_vec[1,1] - CP[1]*inert_vec[0,1]
mat[2,2] = CP[0]*inert_vec[1,2] - CP[1]*inert_vec[0,2]
mat *= np.sqrt(mass[n])
try:
Sr = np.concatenate((Sr, mat), axis=1)
except:
# probably haven't created Sr yet
Sr = mat.copy()
S = np.concatenate((S, Sr), axis=0)
print('combined S:\n', S)
# remove any zero-vector rows
nrm = np.linalg.norm(S, axis=1)
print('nrm(S) =', nrm)
for i in range(5, -1, -1):
# loop over rows of S
if nrm[i] < 1.0e-03: # I picked this threshold arbitrarily!
S = np.delete(S, (i), axis=0)
print('*** deleting row {:d} of S ***'.format(i))
else:
S[i] /= nrm[i] # normalize the row
# orthogonalize rows and re-normalize (only needed when following Ochterski)
S = orthogonalize_rows(S, norm=1)
print('normalized S:\n', S)
print('S dot S:\n', np.dot(S, S.T))
# Start from a mass-weighted unit matrix and project out the rows of S
# also project out previous rows of growing D matrix
D = np.eye(dimen, dimen) # initialize D to the identity matrix
for n in range(natom):
for i in range(3*n, 3*n+3):
# apply mass-weighting
D[i] *= np.sqrt(mass[n])
print('D before any projection:\n', D)
for i in range(S.shape[0]):
# project out each row of S from D
p = np.dot(S[i], D.T)
D -= np.outer(p, S[i])
nrm = np.linalg.norm(D, axis=1)
print('D after projecting out S:\n', D)
# now orthogonalize the remaining basis vectors
D = orthogonalize_rows(D, norm=0) # do not renormalize after orthogonalization
print('D after orthogonalization:\n', D)
nrm = np.linalg.norm(D, axis=1)
print('norm of D rows:\n', nrm)
# Delete the zero rows
zrow = np.where(nrm < 0.001)[0] # I picked this threshold arbitrarily!
zrow = tuple(zrow) # convert to tuple
print('zrow =', zrow)
if len(zrow) != S.shape[0]:
# something is wrong
print('*** Error: There are {:d} external coordinates but {:d} have been eliminated ***'.format(S.shape[0], len(zrow)))
print('...continuing anyway!...')
D = np.delete(D, zrow, axis=0)
# re-normalize the rows of D
nrm = np.linalg.norm(D, axis=1)
print('shape of D =', D.shape)
print('norm of D rows:\n', nrm)
D = np.divide(D.T, nrm).T
print('D after normalization:\n', D)
# adjoin S to D
D = np.concatenate((D, S), axis=0)
print('new shape of D =', D.shape)
nrm = np.linalg.norm(D, axis=1)
print('norm of D rows:\n', nrm)
# change basis for force constants
fcint = np.dot(D, np.dot(fc, D.T))
print('internal-coordinate force constants:\n', fcint)
print('Frequencies before projection:\n', eigval)
igval, igvec = np.linalg.eigh(fcint)
esign = np.sign(igval) # save the sign of each eigenvalue
igval = np.fabs(igval) # all values are now positive
igval = np.sqrt(igval)
igval = np.multiply(esign, igval) # imaginary frequencies are "negative"
igval *= AU2CM
print('Frequencies after projection:\n', igval)
print('Ratios:\n', np.divide(igval, eigval))
return eigval, eigvec.T
##
def filename_root(filename):
# remove any file suffix
m = re.match(r'(.+)\.\w+$', filename)
if m:
return m.group(1)
else:
# no suffix
return filename
##
def rotation_mat_angle(v, a, unit='radian'):
# return a matrix that will rotation by angle a around axis v
# method is from StackExchange.com
if unit == 'degree':
# convert to radians for trig functions
a = np.deg2rad(a)
# normalize vector
u = v / np.linalg.norm(v)
[x, y, z] = u.tolist()
s = np.sin(a)
s2 = np.sin(a/2)
W = np.array([ [0.,-z,y], [z,0.,-x], [-y,x,0.] ])
R = np.identity(3) + s*W + 2*s2*s2*np.dot(W,W)
return R
##
def rotation_mat_align(A, B, scale=False):
# given two numpy vectors (in R3), return the matrix that rotates A into B
# method is from StackExchange.com
# if scale is True, then also scale the magnitude to match
if (len(A) != 3) or (len(B) != 3):
print('**** must be vectors in R3! ****')
return np.zeros((3,3))
# normalize
a = A / np.linalg.norm(A)
b = B / np.linalg.norm(B)
c = np.dot(a, b) # angle cosine
if np.isclose(c, 1.):
# no rotation needed
R = np.identity(3)
elif np.isclose(c, -1.):
# antiparallel; rotate by pi about a perpendicular axis
p = np.cross(a, 1. - a)
R = rotation_mat_angle(p, PI)
else:
# general case
v = np.cross(a, b)
[v1, v2, v3] = v.tolist()
vx = np.array([ [0.,-v3,v2], [v3,0.,-v1], [-v2,v1,0] ])
R = np.identity(3) + vx + np.dot(vx,vx)/(1+c)
if scale:
s = np.linalg.norm(B) / np.linalg.norm(A) # scaling factor
R *= s
return R
##
def normalize(v, length=1.0):
# given a vector, return it scaled to desired length
try:
n = np.linalg.norm(v)
if n == 0:
return np.zeros_like(v)
else:
return np.array(v) * length / n
except:
print('*** failure computing length in normalize()')
print('typeof(v) = ', type(v))
print('v = ', v)
sys.exit(1)
##
def to_radian(angle, reverse=False):
# given an angle in degrees, convert it to radians (or the reverse)
if reverse:
# convert from radians to degrees
return angle * 180. / PI
else:
# convert from degrees to radians
return angle * PI / 180.
##
def angular_momentum(m, r, v):
# given atomic masses, positions, and velocities,
# return the total angular momentum
rxv = np.cross(r,v)
L = (rxv.T * m).T.sum(axis=0)
return L
##
def angle_canon(a, unit='radian'):
# given an angle (or numpy array of them), return the equivalent
# value in the interval (-pi, pi]
if unit == 'degree':
c = (-a + 180.) % 360. - 180.
else:
c = (-a + PI) % (2 * PI) - PI
return -c
##
def in_bounds(x, target, tolerance):
# is 'x' in the range 'target' +- 'tolerance' ?
tolerance = np.abs(tolerance)
return ( (x < target+tolerance) and (x > target-tolerance) )
##
def smoothing(x, y, x2, style='gau', width=-1, normalize=True):
# return smoothed y values for (x,y) data series (numpy arrays)
# ouput is over the smoothed range defined by x2 (a numpy array)
# no sorting necessary
# styles: 'exp' for exponential; 'gau' for gaussian
# width parameter (sigma) defaults to 1% of x-range
if len(x) != len(y):
# bad input data
return None
xlo = min(x)
xhi = max(x)
if (width <= 0):
width = (xhi - xlo) * 0.01
y2 = np.zeros_like(x2)
for i in range(len(y)):
dx = x2 - x[i]
if style == 'gau':
dx = (dx/width)**2
t = np.exp(-dx)
if style == 'exp':
dx = abs(dx/width)
t = np.exp(-dx)
if normalize:
t = t / t.sum()
y2 = y2 + t * y[i]
return y2
##
def joinGeometries(Glist):
# Given a list of Geometry objects, return a single Geometry
# that includes all their atoms
# if charges are specified, sum them
atomlist = []
q = 0
for G in Glist:
atomlist += G.atom
try:
q += G.charge
except:
q = None
Gtot = Geometry(atomlist, intype='atlist')
Gtot.charge = q
return Gtot
##
def same_connectivity(Struct1, Struct2, tol=1.3):
# compare connectivity tables
# return True if same, else False
conn1 = Struct1.connection_table(tol)
conn2 = Struct2.connection_table(tol)
return np.array_equal(conn1, conn2)
##
def min_RMSD(Geom, refGeom, use_masses=False, inplace=False):
# align Geom to refGeom and return the final RMSD
G = RMSD_align(Geom, refGeom, use_masses=use_masses)
if inplace:
Geom.copyxyz(G)
return RMSD(G, refGeom)
##
def RMSD_align(Geom, refGeom, use_masses=False):
# translate and rotate Geometry object 'Geom' to minimize RMSD with 'refGeom'
# return a new Geometry object
G = Geom.copy() # avoid damaging the input geometries
refG = refGeom.copy()
if not use_masses:
# Use unit mass for every atom
mvec = np.ones(G.natom())
G.set_masses(mvec)
refG.set_masses(mvec)
transl = refG.COM()
#print('::: initial RMSD = ', RMSD(G, refG), end='')
G.center(use_masses=use_masses)
refG.center(use_masses=use_masses)
U = Kabsch(G, refG, use_masses=use_masses)
G.rotate(U)
#print(' after align = ', RMSD(G, refG))
G.translate(transl)
return G
##
'''def RMSD(Geom1, Geom2):
# return the RMSD between two Geometry objects (no weights)
v1 = Geom1.toVector()
v2 = Geom2.toVector()
if len(v1) != len(v2):
print_err('', 'Inconsistent atom counts: {:d} for Geom1 and {:d} for Geom2'.format(Geom1.natom, Geom2.natom()))
natom = len(v1) // 3
rmsd = distance(v1, v2) / np.sqrt(natom)
return rmsd
'''##
def RMSD(Geom1, Geom2):
# return the RMSD between two Geometry objects (no weights)
v1 = Geom1.toVector().reshape((-1, 3))
v2 = Geom2.toVector().reshape((-1, 3))
if v1.shape != v2.shape:
print_err('', 'Inconsistent atom counts: {:d} for Geom1 and {:d} for Geom2'.format(Geom1.natom, Geom2.natom()))
d = np.array([distance(v1[i], v2[i]) for i in range(v1.shape[0])])
dsq = d**2
rmsd = np.sqrt(dsq.mean())
return rmsd
##
def Kabsch(Geom1, Geom2, use_masses=False):
# return the rotation matrix that mimizes the unweighted RMSD (Wikipedia: "Kabsch algorithm")
# (tranform G1 toward G2)
G1 = Geom1.copy() # avoid damaging the input Geometry objects
G2 = Geom2.copy()
natom = G1.natom()
if natom != G2.natom():
print_err('', 'Inconsistent atom counts: {:d} for Geom1 and {:d} for Geom2'.format(natom, G2.natom()))
# translate barycenters to origin
if not use_masses:
# Use unit mass for every atom
mvec = np.ones(natom)
G1.set_masses(mvec)
G2.set_masses(mvec)
G1.center(use_masses=use_masses)
G2.center(use_masses=use_masses)
elem, P = G2.separateXYZ() # the reference
elem, Q = G1.separateXYZ()
A = np.dot(P.T, Q)
V, s, W = np.linalg.svd(A)
d = np.sign(np.linalg.det(np.dot(V,W)))
D = np.diag([1., 1., d])
U = np.dot(V, np.dot(D,W))
return U
##
def average_structure(Struct1, Struct2, weight1=0.5, weight2=0.5):
# given two compatible structures, return a similar structure
# with coordinates that are the weighted average of the
# input structures
if (Struct1.coordtype != Struct2.coordtype) or (Struct1.natom() != Struct2.natom()):
# structures are not compatible
return None
v1 = Struct1.toVector()
v2 = Struct2.toVector()
try:
v3 = (weight1 * v1 + weight2 * v2) / (weight1 + weight2)
except:
# probably weights sum to zero
return np.nan
Result = Struct1.copy()
unitS = Struct1.unitX()
Result.fromVector(v3, unitS)
return Result
##
def FGHcheck(x, y, count, acc=1.0e-6, abort=True):
# for Fourier Grid Hamiltonian calculations
# return True if arrays are OK, else False
npt = len(x)
if len(y) != npt:
if abort:
print_err('', 'x and y have different lengths')
else:
return False
if (count == 'odd'):
if (npt % 2 == 0):
if abort:
print_err('', 'number of points is even but should be odd')
else:
return False
elif (count == 'even'):
if (npt % 2 == 1):
if abort:
print_err('', 'number of points is odd but should be even')
else:
return False
else:
print_err('', "number of points must be 'even' or 'odd', not '{:s}' ".format(str(count)))
# check for uniform intervals
dx = np.ediff1d(x)
ddx = np.ediff1d(dx) / x.max()
if not np.allclose(ddx, | np.zeros_like(ddx) | numpy.zeros_like |
import argparse
import colorsys
import math
import os
import random
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pyglet
import trimesh
from PIL import Image, ImageEnhance
from tqdm import tqdm
from OpenGL.GL import GL_LINEAR_MIPMAP_LINEAR
import pyrender
from archiver import Archiver, SceneData
from pyrender import (DirectionalLight, Mesh, Node, OffscreenRenderer,
PerspectiveCamera, PointLight, RenderFlags, Scene,
Primitive)
texture_directory = os.path.join(os.path.dirname(__file__), "..", "textures")
object_directory = os.path.join(os.path.dirname(__file__), "objects")
floor_textures = [
"{}/lg_floor_d.tga".format(texture_directory),
"{}/lg_style_01_floor_blue_d.tga".format(texture_directory),
"{}/lg_style_01_floor_orange_bright_d.tga".format(texture_directory),
]
wall_textures = [
"{}/lg_style_01_wall_cerise_d.tga".format(texture_directory),
"{}/lg_style_01_wall_green_bright_d.tga".format(texture_directory),
"{}/lg_style_01_wall_red_bright_d.tga".format(texture_directory),
"{}/lg_style_02_wall_yellow_d.tga".format(texture_directory),
"{}/lg_style_03_wall_orange_bright_d.tga".format(texture_directory),
]
objects = [
pyrender.objects.Capsule,
pyrender.objects.Cylinder,
pyrender.objects.Icosahedron,
pyrender.objects.Box,
pyrender.objects.Sphere,
]
def set_random_texture(node, path):
texture_image = Image.open(path).convert("RGB")
primitive = node.mesh.primitives[0]
assert isinstance(primitive, Primitive)
primitive.material.baseColorTexture.source = texture_image
primitive.material.baseColorTexture.sampler.minFilter = GL_LINEAR_MIPMAP_LINEAR
def build_scene(floor_textures, wall_textures, fix_light_position=False):
scene = Scene(
bg_color= | np.array([153 / 255, 226 / 255, 249 / 255]) | numpy.array |
import tensorflow as tf
import numpy as np
import functools as ft
import env
import reward
import tensorflow_probability as tfp
import random
import agentsEnv as ag
import itertools as it
import pygame as pg
class ApproximatePolicy():
def __init__(self, actionSpace):
self.actionSpace = actionSpace
self.numActionSpace = len(self.actionSpace)
def __call__(self, stateBatch, model):
graph = model.graph
state_ = graph.get_tensor_by_name('inputs/state_:0')
actionDistribution_ = graph.get_tensor_by_name('outputs/actionDistribution_:0')
actionDistributionBatch = model.run(actionDistribution_, feed_dict = {state_ : stateBatch})
actionIndexBatch = [np.random.choice(range(self.numActionSpace), p = actionDistribution) for actionDistribution in actionDistributionBatch]
actionBatch = np.array([self.actionSpace[actionIndex] for actionIndex in actionIndexBatch])
return actionBatch
class SampleTrajectory():
def __init__(self, maxTimeStep, transitionFunction, isTerminal):
self.maxTimeStep = maxTimeStep
self.transitionFunction = transitionFunction
self.isTerminal = isTerminal
def __call__(self, actor):
oldState , action = None, None
oldState = self.transitionFunction(oldState, action)
trajectory = []
for time in range(self.maxTimeStep):
oldStateBatch = oldState.reshape(1, -1)
actionBatch = actor(oldStateBatch)
action = actionBatch[0]
# actionBatch shape: batch * action Dimension; only keep action Dimention in shape
newState = self.transitionFunction(oldState, action)
trajectory.append((oldState, action))
terminal = self.isTerminal(oldState)
if terminal:
break
oldState = newState
return trajectory
class AccumulateReward():
def __init__(self, decay, rewardFunction):
self.decay = decay
self.rewardFunction = rewardFunction
def __call__(self, trajectory):
rewards = [self.rewardFunction(state, action) for state, action in trajectory]
accumulateReward = lambda accumulatedReward, reward: self.decay * accumulatedReward + reward
accumulatedRewards = np.array([ft.reduce(accumulateReward, reversed(rewards[TimeT: ])) for TimeT in range(len(rewards))])
return accumulatedRewards
class TrainCriticMonteCarloTensorflow():
def __init__(self, accumulateReward):
self.accumulateReward = accumulateReward
def __call__(self, episode, criticModel):
mergedEpisode = np.concatenate(episode)
numBatch = len(mergedEpisode)
stateEpisode, actionEpisode = list(zip(*mergedEpisode))
stateBatch = np.array(stateEpisode).reshape(numBatch, -1)
mergedAccumulatedRewardsEpisode = np.concatenate([self.accumulateReward(trajectory) for trajectory in episode])
valueTargetBatch = np.array(mergedAccumulatedRewardsEpisode).reshape(numBatch, -1)
graph = criticModel.graph
state_ = graph.get_tensor_by_name('inputs/state_:0')
valueTarget_ = graph.get_tensor_by_name('inputs/valueTarget_:0')
loss_ = graph.get_tensor_by_name('outputs/loss_:0')
trainOpt_ = graph.get_operation_by_name('train/adamOpt_')
loss, trainOpt = criticModel.run([loss_, trainOpt_], feed_dict = {state_ : stateBatch,
valueTarget_ : valueTargetBatch
})
return loss, criticModel
def approximateValue(stateBatch, criticModel):
graph = criticModel.graph
state_ = graph.get_tensor_by_name('inputs/state_:0')
value_ = graph.get_tensor_by_name('outputs/value_/BiasAdd:0')
valueBatch = criticModel.run(value_, feed_dict = {state_ : stateBatch})
return valueBatch
class EstimateAdvantageMonteCarlo():
def __init__(self, accumulateReward):
self.accumulateReward = accumulateReward
def __call__(self, episode, critic):
mergedEpisode = np.concatenate(episode)
numBatch = len(mergedEpisode)
stateEpisode, actionEpisode = list(zip(*mergedEpisode))
stateBatch, actionBatch = np.array(stateEpisode).reshape(numBatch, -1), np.array(actionEpisode).reshape(numBatch, -1)
mergedAccumulatedRewardsEpisode = np.concatenate([self.accumulateReward(trajectory) for trajectory in episode])
accumulatedRewardsBatch = np.array(mergedAccumulatedRewardsEpisode).reshape(numBatch, -1)
advantageBatch = accumulatedRewardsBatch - critic(stateBatch)
advantages = np.concatenate(advantageBatch)
return advantages
class TrainActorMonteCarloTensorflow():
def __init__(self, actionSpace):
self.actionSpace = actionSpace
self.numActionSpace = len(actionSpace)
def __call__(self, episode, advantages, actorModel):
mergedEpisode = np.concatenate(episode)
numBatch = len(mergedEpisode)
stateEpisode, actionEpisode = list(zip(*mergedEpisode))
actionIndexEpisode = np.array([list(self.actionSpace).index(list(action)) for action in actionEpisode])
actionLabelEpisode = np.zeros([numBatch, self.numActionSpace])
actionLabelEpisode[np.arange(numBatch), actionIndexEpisode] = 1
stateBatch, actionLabelBatch = np.array(stateEpisode).reshape(numBatch, -1), np.array(actionLabelEpisode).reshape(numBatch, -1)
graph = actorModel.graph
state_ = graph.get_tensor_by_name('inputs/state_:0')
actionLabel_ = graph.get_tensor_by_name('inputs/actionLabel_:0')
advantages_ = graph.get_tensor_by_name('inputs/advantages_:0')
loss_ = graph.get_tensor_by_name('outputs/loss_:0')
trainOpt_ = graph.get_operation_by_name('train/adamOpt_')
loss, trainOpt = actorModel.run([loss_, trainOpt_], feed_dict = {state_ : stateBatch,
actionLabel_ : actionLabelBatch,
advantages_ : advantages
})
return loss, actorModel
class OfflineAdvantageActorCritic():
def __init__(self, numTrajectory, maxEpisode, render):
self.numTrajectory = numTrajectory
self.maxEpisode = maxEpisode
self.render = render
def __call__(self, actorModel, criticModel, approximatePolicy, sampleTrajectory, trainCritic, approximateValue, estimateAdvantage, trainActor):
for episodeIndex in range(self.maxEpisode):
actor = lambda state: approximatePolicy(state, actorModel)
episode = [sampleTrajectory(actor) for trajectoryIndex in range(self.numTrajectory)]
valueLoss, criticModels = trainCritic(episode, criticModel)
critic = lambda state: approximateValue(state, criticModel)
advantages = estimateAdvantage(episode, critic)
policyLoss, actorModel = trainActor(episode, advantages, actorModel)
print(np.mean([len(trajectory) for trajectory in episode]))
if episodeIndex %1 == -1:
for timeStep in episode[-1]:
self.render(timeStep[0])
return actorModel, criticModel
def main():
#tf.set_random_seed(123)
#np.random.seed(123)
actionSpace = [[10,0],[7,7],[0,10],[-7,7],[-10,0],[-7,-7],[0,-10],[7,-7]]
numActionSpace = len(actionSpace)
numStateSpace = 4
numActorFC1Unit = 50
numActorFC2Unit = 50
numActorFC3Unit = 50
numActorFC4Unit = 50
numCriticFC1Unit = 100
numCriticFC2Unit = 100
numCriticFC3Unit = 100
numCriticFC4Unit = 100
learningRateActor = 1e-4
learningRateCritic = 3e-4
actorGraph = tf.Graph()
with actorGraph.as_default():
with tf.name_scope("inputs"):
state_ = tf.placeholder(tf.float32, [None, numStateSpace], name="state_")
actionLabel_ = tf.placeholder(tf.int32, [None, numActionSpace], name="actionLabel_")
advantages_ = tf.placeholder(tf.float32, [None, ], name="advantages_")
with tf.name_scope("hidden"):
initWeight = tf.random_uniform_initializer(-0.03, 0.03)
initBias = tf.constant_initializer(0.01)
fullyConnected1_ = tf.layers.dense(inputs = state_, units = numActorFC1Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
fullyConnected2_ = tf.layers.dense(inputs = fullyConnected1_, units = numActorFC2Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
fullyConnected3_ = tf.layers.dense(inputs = fullyConnected2_, units = numActorFC2Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
allActionActivation_ = tf.layers.dense(inputs = fullyConnected3_, units = numActionSpace, activation = None, kernel_initializer = initWeight, bias_initializer = initBias )
with tf.name_scope("outputs"):
actionDistribution_ = tf.nn.softmax(allActionActivation_, name = 'actionDistribution_')
actionEntropy_ = tf.multiply(tfp.distributions.Categorical(probs = actionDistribution_).entropy(), 1, name = 'actionEntropy_')
negLogProb_ = tf.nn.softmax_cross_entropy_with_logits_v2(logits = allActionActivation_, labels = actionLabel_, name = 'negLogProb_')
loss_ = tf.reduce_mean(tf.multiply(negLogProb_, advantages_), name = 'loss_')
actorLossSummary = tf.summary.scalar("ActorLoss", loss_)
with tf.name_scope("train"):
trainOpt_ = tf.train.AdamOptimizer(learningRateActor, name = 'adamOpt_').minimize(loss_)
actorInit = tf.global_variables_initializer()
actorModel = tf.Session(graph = actorGraph)
actorModel.run(actorInit)
criticGraph = tf.Graph()
with criticGraph.as_default():
with tf.name_scope("inputs"):
state_ = tf.placeholder(tf.float32, [None, numStateSpace], name="state_")
valueTarget_ = tf.placeholder(tf.float32, [None, 1], name="valueTarget_")
with tf.name_scope("hidden"):
initWeight = tf.random_uniform_initializer(-0.03, 0.03)
initBias = tf.constant_initializer(0.001)
fullyConnected1_ = tf.layers.dense(inputs = state_, units = numActorFC1Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
fullyConnected2_ = tf.layers.dense(inputs = fullyConnected1_, units = numActorFC2Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
fullyConnected3_ = tf.layers.dense(inputs = fullyConnected2_, units = numActorFC3Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
fullyConnected4_ = tf.layers.dense(inputs = fullyConnected3_, units = numActorFC4Unit, activation = tf.nn.relu, kernel_initializer = initWeight, bias_initializer = initBias )
with tf.name_scope("outputs"):
value_ = tf.layers.dense(inputs = fullyConnected4_, units = 1, activation = None, name = 'value_', kernel_initializer = initWeight, bias_initializer = initBias )
diff_ = tf.subtract(valueTarget_, value_, name = 'diff_')
loss_ = tf.reduce_mean(tf.square(diff_), name = 'loss_')
criticLossSummary = tf.summary.scalar("CriticLoss", loss_)
with tf.name_scope("train"):
trainOpt_ = tf.train.AdamOptimizer(learningRateCritic, name = 'adamOpt_').minimize(loss_)
criticInit = tf.global_variables_initializer()
criticModel = tf.Session(graph = criticGraph)
criticModel.run(criticInit)
xBoundary = [0, 360]
yBoundary = [0, 360]
checkBoundaryAndAdjust = ag.CheckBoundaryAndAdjust(xBoundary, yBoundary)
initSheepPosition = np.array([180, 180])
initWolfPosition = np.array([180, 180])
initSheepVelocity = np.array([0, 0])
initWolfVelocity = np.array([0, 0])
initSheepPositionNoise = | np.array([120, 120]) | numpy.array |
# SPDX-License-Identifier: Apache-2.0
"""Unit Tests for optimizers such as TransposeOptimizer."""
import unittest
import numpy as np
from onnx import helper, numpy_helper, TensorProto, OperatorSetIdProto
from parameterized import parameterized
from backend_test_base import Tf2OnnxBackendTestBase
from common import unittest_main, group_nodes_by_type, check_opset_min_version, check_opset_max_version, get_test_config
from tf2onnx import utils, constants
from tf2onnx.graph import GraphUtil
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test
class OptimizerTests(Tf2OnnxBackendTestBase):
"""Run original model proto and modified model proto with onnxruntime, compare the results."""
def run_and_compare(self, output_names_with_port, onnx_feed_dict, origin_proto, op_type,
remaining_op_num, debug=False, rtol=1e-07):
utils.make_sure(op_type is not None, "op_type should be specified")
utils.make_sure(remaining_op_num is not None, "remaining_op_num should be specified")
utils.make_sure(self.config.is_onnxruntime_backend, "only onnxruntime is supported to test transpose optimizer")
origin_model_path = self.save_onnx_model(origin_proto, onnx_feed_dict, postfix="_origin")
expected = self.run_onnxruntime(origin_model_path, onnx_feed_dict, output_names_with_port)
new_proto, new_graph = GraphUtil.optimize_model_proto(origin_proto, catch_errors=False, return_graph=True)
self.assertTrue(new_proto, msg="model proto after optimizer should not be None")
new_model_path = self.save_onnx_model(new_proto, onnx_feed_dict, postfix="_opt")
current = GraphUtil.get_node_count_from_onnx_graph(new_proto.graph)
actual = self.run_onnxruntime(new_model_path, onnx_feed_dict, output_names_with_port)
for expected_val, actual_val in zip(expected, actual):
self.assertAllClose(expected_val, actual_val, rtol=rtol, atol=1e-5)
self.assertEqual(expected_val.dtype, actual_val.dtype)
self.assertEqual(expected_val.shape, actual_val.shape)
self.assertTrue(current[op_type] == remaining_op_num,
msg="Expect " + str(remaining_op_num) + " " + op_type + " ops left, but actually " + str(
current[op_type]) + " left")
self.assert_shapes_correct(new_graph, allow_missing=False, run_checker=True)
return new_proto
@staticmethod
def _make_onnx_const(np_val, output_name):
node = helper.make_node(
'Constant',
inputs=[],
outputs=[output_name],
value=helper.make_tensor(
name=output_name,
data_type=utils.map_numpy_to_onnx_dtype(np_val.dtype),
dims=np_val.shape,
vals=np_val.flatten().astype(np_val.dtype).tolist(),
),
)
return node
def make_model(self, graph, producer_name="onnx-tests"):
imp = OperatorSetIdProto()
imp.version = self.config.opset
model_proto = helper.make_model(graph, producer_name=producer_name, opset_imports=[imp])
try:
model_proto.ir_version = constants.OPSET_TO_IR_VERSION.get(self.config.opset, model_proto.ir_version)
except: # pylint: disable=bare-except
pass
return model_proto
# Tranpose Optimizer Tests Start
def run_transpose_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
remaining_transpose_num=None, debug=False, rtol=1e-07):
return self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type="Transpose",
remaining_op_num=remaining_transpose_num, debug=debug, rtol=rtol)
def check_transpose_perm(self, model_proto, expected_perm):
for node in model_proto.graph.node:
if node.op_type == "Transpose":
perm = list(node.attribute[0].ints)
self.assertEqual(perm, expected_perm)
@parameterized.expand([
((2, 3, 4, 5), [0, 3, 1, 2], [0, 2, 3, 1]),
((2, 3, 4, 5, 6), [0, 4, 1, 2, 3], [0, 2, 3, 4, 1]),
])
def test_transpose_with_concat(self, input_shape, perm, inner_perm):
input_shape_with_trans = [input_shape[i] for i in perm]
for axis in range(len(input_shape)):
output_before_trans = list(input_shape)
output_before_trans[axis] *= 2
output_shape = [output_before_trans[i] for i in perm]
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=inner_perm, name="trans")
node2 = helper.make_node("Concat", ["Y", "input_data2"], ["Z"], axis=axis, name="concat")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm, name="trans2")
graph = helper.make_graph(
[node1, node2, node3],
"test_transpose_with_concat",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape_with_trans),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, input_shape),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape_with_trans).astype(np.float32),
"input_data2": np.random.randn(*input_shape).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_add1(self, input_shape, perm_input, perm_output):
# when transpose follows with a broadcasting op
# reshape is needed when switching transpose with this op and op need broadcast its inputs
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=perm_input, name="trans")
node2 = helper.make_node("Add", ["Y", "input_data2"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node1, node2, node3],
"transpose_with_shape",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, (input_shape[1],)),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, input_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape).astype(np.float32),
"input_data2": np.random.randn(input_shape[1]).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_add2(self, input_shape1, input_shape2, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["input_data1"], ["Y"], perm=perm_input, name="trans")
node2 = helper.make_node("Add", ["Y", "input_data2"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans2")
output_shape = input_shape1
graph = helper.make_graph(
[node1, node2, node3],
"transpose_with_shape",
[helper.make_tensor_value_info("input_data1", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("input_data2", TensorProto.FLOAT, input_shape2),
],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
feed_dict = {"input_data1": np.random.randn(*input_shape1).astype(np.float32),
"input_data2": np.random.randn(*input_shape2).astype(np.float32),
}
self.run_transpose_compare(["res"], feed_dict, model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_relu(self, shape, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Relu", ["Y"], ["Z"], name="relu")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"relu-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_leaky_relu(self, shape, perm_input, perm_output):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("LeakyRelu", ["Y"], ["Z"], alpha=0.02, name="relu")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"LeakyRelu-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "QuantizeLinear")
def test_transpose_quantize(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array(3, dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("QuantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="quantize")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"quantize-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.UINT8, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [0, 2, 1], [0, 2, 1]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "QuantizeLinear with axis")
def test_transpose_quantize_with_axis(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array([0.75, 0.1, 2.3, 0.3], dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array([2, 4, 6, 8], dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("QuantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="quantize", axis=1)
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"quantize-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.UINT8, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "DequantizeLinear")
def test_transpose_dequantize(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array(0.75, dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array(3, dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("DequantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="dequantize")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"dequantize-test",
[helper.make_tensor_value_info("X", TensorProto.UINT8, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randint(0, 100, shape, np.uint8)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [0, 2, 1], [0, 2, 1]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "DequantizeLinear with axis")
def test_transpose_dequantize_with_axis(self, shape, perm_input, perm_output):
scale = numpy_helper.from_array(np.array([0.75, 0.1, 2.3, 0.3], dtype=np.float32), name='scale')
zero_point = numpy_helper.from_array(np.array([2, 4, 6, 8], dtype=np.uint8), name='zero_point')
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("DequantizeLinear", ["Y", "scale", "zero_point"], ["Z"], name="dequantize", axis=1)
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"dequantize-test",
[helper.make_tensor_value_info("X", TensorProto.UINT8, shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, shape)],
[scale, zero_point]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randint(0, 100, shape, np.uint8)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
([2, 3, 4], [1, 2, 1], [1], [0, 2, 1], [0, 2, 1]),
([2, 3, 4, 5], [1, 2, 1, 2], [1], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(9, "Slice in opset 9 and takes 'axes, 'start' and 'ends' as attributes")
def test_transpose_slice(self, input_shape, slice_size, axes, perm_input, perm_output):
axes = np.array(axes, dtype=np.int64)
starts = np.array([0] * axes.size, dtype=np.int64)
ends = []
for i in range(axes.size):
ends.append(slice_size[axes[i]])
ends = np.array(ends, dtype=np.int64)
output_shape = input_shape.copy()
for axis in axes:
output_shape[perm_input[axis]] = slice_size[axis]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Slice", ["Y"], ["Z"], starts=starts, ends=ends, axes=axes, name="slice")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"slice-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
[
helper.make_tensor("starts", TensorProto.INT64, starts.shape, starts),
helper.make_tensor("ends", TensorProto.INT64, ends.shape, ends),
helper.make_tensor("axes", TensorProto.INT64, axes.shape, axes)
]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
([2, 3, 4], [1, 2, 1], [1], [0, 2, 1], [0, 2, 1]),
([2, 3, 4, 5], [1, 2, 1, 2], [1], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5], [1, 2, 1, 2], [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
([2, 3, 4, 5, 6], [1, 2, 1, 2, 1], [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(10, "Slice in opset 10 can accept dynamic 'start' and 'ends'")
def test_transpose_slice_opset_10(self, input_shape, slice_size, axes, perm_input, perm_output):
axes = np.array(axes, dtype=np.int32)
starts = np.array([0] * axes.size, dtype=np.int32)
ends = []
for i in range(axes.size):
ends.append(slice_size[axes[i]])
ends = np.array(ends, dtype=np.int32)
output_shape = input_shape.copy()
for axis in axes:
output_shape[perm_input[axis]] = slice_size[axis]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Slice", ["Y", "starts", "ends", "axes"], ["Z"], name="slice")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node1, node2, node3],
"slice-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
[
helper.make_tensor("starts", TensorProto.INT32, starts.shape, starts),
helper.make_tensor("ends", TensorProto.INT32, ends.shape, ends),
helper.make_tensor("axes", TensorProto.INT32, axes.shape, axes)
]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), (2, 0, 1), (1, 2, 0)),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
const_3_val = np.random.randn(*input_shape2).astype(np.float32)
const_3 = helper.make_tensor("const_3", TensorProto.FLOAT, input_shape2, const_3_val.flatten())
const_3_node = helper.make_node("Constant", [], ["const_3"], value=const_3, name="const_3")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "const_3", "const_2", "const_1"], ["Z"], name="max")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, const_2_node, const_3_node, node1, node2, node3],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max_input_non_const(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "non_const", "const_2", "const_1"], ["Z"], name="max")
node3 = helper.make_node("Transpose", ["Z"], ["Z1"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, const_2_node, node1, node2, node3],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z1"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"non_const": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(8, "Max in opset 10 supports broadcasting")
def test_transpose_max_no_cancel(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = [2.0]
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, (1,), const_1_val)
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
const_2_val = np.random.randn(*input_shape2).astype(np.float32)
const_2 = helper.make_tensor("const_2", TensorProto.FLOAT, input_shape2, const_2_val.flatten())
const_2_node = helper.make_node("Constant", [], ["const_2"], value=const_2, name="const_2")
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Max", ["Y", "non_const", "const_2", "const_1"], ["Z"], name="max")
output_shape = [None] * len(input_shape1)
graph = helper.make_graph(
[const_1_node, const_2_node, node1, node2],
"Max-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"non_const": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=2)
@parameterized.expand([
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_transpose_merge(self, input_shape1, input_shape2, perm):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node1 = helper.make_node("Transpose", ["X"], ["Y_1"], perm=perm, name="trans_1")
node2 = helper.make_node("Mul", ["Y", "Y_1"], ["OUT"], name="mul")
output_shape = input_shape2
graph = helper.make_graph(
[node0, node1, node2],
"transpose-merge-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_mul_as_square(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans")
node1 = helper.make_node("Mul", ["Y", "Y"], ["Z"], name="mul")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans_1")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-mul-as-sqr-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_mul_broadcastable_const(self, shape, perm_input, perm_output):
const = numpy_helper.from_array(np.random.random((1, shape[1])).astype(np.float32), name='const')
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans")
node1 = helper.make_node("Mul", ["Y", "const"], ["Z"], name="mul")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans_1")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-mul-const-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
[const],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1]),
((2, 3, 4, 5), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
def test_transpose_with_shape(self, shape, perm):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Shape", ["Y"], ["Z"], name="shape")
graph = helper.make_graph(
[node1, node2],
"transpose_with_shape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("Z", TensorProto.INT64, [len(shape)])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), [2, 0, 1]),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_transpose_with_identity(self, input_shape, output_shape, perm):
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
graph = helper.make_graph(
[node1, node2],
"transpose_with_identity",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_sqrt(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans1")
node1 = helper.make_node("Sqrt", ["Y"], ["Z"], name="sqrt")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-sqrt-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 3, 4), [4, 3], [0, 2, 1], [1, 0]),
((1, 3, 4, 5), (4, 5, 3), [0, 2, 3, 1], [1, 2, 0]),
((1, 3, 4, 5, 6), (4, 5, 6, 3), [0, 2, 3, 4, 1], [1, 2, 3, 0]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze1(self, input_shape, output_shape, perm, expected_perm):
# squeeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[0])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((1, 3, 4), (1, 4, 1, 3, 1, 1), [2, 0, 1], [0, 4, 5], [2, 3, 0, 1, 4, 5]),
((1, 3, 4, 5), (1, 1, 4, 5, 1, 3, 1), [0, 2, 3, 1], [0, 4, 6], [0, 1, 4, 5, 2, 3, 6]),
((1, 3, 4, 5, 6), (1, 1, 4, 5, 1, 6, 1, 3), [0, 2, 3, 4, 1], [0, 4, 6], [0, 1, 4, 5, 6, 7, 2, 3]),
])
def test_transpose_with_unsqueeze(self, input_shape, output_shape, perm, axes_val, expected_perm):
# unsqueeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
if self.config.opset <= 12:
node2 = helper.make_node("Unsqueeze", ["Y"], ["Z"], name="unsqueeze", axes=axes_val)
nodes = [node1, node2]
else:
axes = self._make_onnx_const(np.array(axes_val, dtype=np.int64), "axes")
node2 = helper.make_node("Unsqueeze", ["Y", "axes"], ["Z"], name="unsqueeze")
nodes = [axes, node1, node2]
graph = helper.make_graph(
nodes,
"transpose_with_unsqueeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((1, 3, 4), [4, 3], [0, 2, 1], [1, 0]),
((1, 3, 4, 5), (4, 5, 3), [0, 2, 3, 1], [1, 2, 0]),
((1, 3, 4, 5, 6), (4, 5, 6, 3), [0, 2, 3, 4, 1], [1, 2, 3, 0]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze1_13(self, input_shape, output_shape, perm, expected_perm):
# squeeze the first dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([0], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 4, 1, 5), (3, 5, 4), [0, 2, 3, 1], [0, 2, 1]),
((3, 4, 1, 5, 6), (3, 5, 6, 4), [0, 2, 3, 4, 1], [0, 2, 3, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze2(self, input_shape, output_shape, perm, expected_perm):
# squeeze the second dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 4, 1, 5), (3, 5, 4), [0, 2, 3, 1], [0, 2, 1]),
((3, 4, 1, 5, 6), (3, 5, 6, 4), [0, 2, 3, 4, 1], [0, 2, 3, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze2_13(self, input_shape, output_shape, perm, expected_perm):
# squeeze the second dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
model_after_opt = self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
self.check_transpose_perm(model_after_opt, expected_perm)
@parameterized.expand([
((3, 1, 4, 5), (3, 4, 5), [0, 2, 3, 1]),
((3, 1, 4, 5, 6), (3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze3(self, input_shape, output_shape, perm):
# squeeze the last dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[len(input_shape) - 1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 4, 5), (3, 4, 5), [0, 2, 3, 1]),
((3, 1, 4, 5, 6), (3, 4, 5, 6), [0, 2, 3, 4, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze3_13(self, input_shape, output_shape, perm):
# squeeze the last dim
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([len(input_shape) - 1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 1, 5), (3, 5), [0, 2, 3, 1]),
((3, 1, 1, 5, 4), (3, 5, 4), [0, 2, 3, 4, 1]),
])
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze4(self, input_shape, output_shape, perm):
# squeeze the two dims
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
node2 = helper.make_node("Squeeze", ["Y"], ["Z"], name="squeeze", axes=[1, len(input_shape) - 1])
graph = helper.make_graph(
[node1, node2],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 1, 1, 5), (3, 5), [0, 2, 3, 1]),
((3, 1, 1, 5, 4), (3, 5, 4), [0, 2, 3, 4, 1]),
])
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_transpose_with_squeeze4_13(self, input_shape, output_shape, perm):
# squeeze the two dims
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
axes = self._make_onnx_const(np.array([1, len(input_shape) - 1], dtype=np.int64), "axes")
node2 = helper.make_node("Squeeze", ["Y", "axes"], ["Z"], name="squeeze")
graph = helper.make_graph(
[node1, node2, axes],
"transpose_with_squeeze",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Z", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Z"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((10, 3, 4), [0, 2, 1], [0, 2, 1]),
((10, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((10, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_with_loop(self, shape, perm_input, perm_output):
def _define_loop_graph(external_inputs):
# external_inputs: external node which will be used by this graph
# graph without loop carried
# computation
# for(...){a = external_inputs[i]; b = trans(a), c = squeeze(b)}, c is scan output
node1 = helper.make_node("Gather", [external_inputs[0], "loop_iter_num"], ["Y0"])
node2 = helper.make_node("Transpose", ["Y0"], ["Z0"], perm=perm_input)
# graph output
if get_test_config().opset <= 12:
node3 = helper.make_node("Squeeze", ["Z0"], ["scan_output"], axes=[0])
const_node = None
else:
const_tensor = helper.make_tensor(name='const', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], dtype=np.int64))
const_node = helper.make_node("Constant", [], ["axes_const"], value=const_tensor, name="const")
node3 = helper.make_node("Squeeze", ["Z0", "axes_const"], ["scan_output"])
node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"])
node5 = helper.make_node("Identity", ["loop_condition"], ["loop_carried_output"])
nodes = [node1, node2, node3, node4, node5]
if const_node is not None:
nodes.append(const_node)
graph = helper.make_graph(
nodes,
"loop_subgraph",
[helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)), # iteration_num
helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()), # condition
helper.make_tensor_value_info("loop_carried", TensorProto.BOOL, ()) # loop_carried
],
[helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("loop_carried_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("scan_output", TensorProto.FLOAT, ["unknown"] * (len(shape) - 1))
],
)
return graph
def _make_loop(external_inputs, outputs):
trip_cnt = self._make_onnx_const(np.array(10, dtype=np.int64), "trip_cnt")
cond = self._make_onnx_const(np.array(True, dtype=np.bool), "cond")
sub_graph = _define_loop_graph(external_inputs)
loop_node = helper.make_node("Loop", ["trip_cnt", "cond", "cond"], outputs,
name="loop", body=sub_graph)
return trip_cnt, cond, loop_node
nodes = _make_loop(["array"], ["loop_carried", "scan_out"])
res = helper.make_node("Transpose", ["scan_out"], ["Y"], perm=perm_output, name="trans")
graph = helper.make_graph(
[*nodes, res],
"transpose_with_loop",
[helper.make_tensor_value_info("array", TensorProto.FLOAT, ["unknow"] * len(shape))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, ["unknow"] * len(shape))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"array": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [4, 2, 3], [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [2, 4, 5, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [2, 4, 5, 6, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_trans_with_sub(self, io_shape, const_shape_base, perm_input, perm_output):
const_shapes = []
for i in range(len(const_shape_base)):
const_shapes.append(const_shape_base[i:])
for trans_is_first_input in [True, False]:
for const_shape in const_shapes:
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_a")
const_tensor = helper.make_tensor(name='const', data_type=TensorProto.FLOAT, dims=const_shape,
vals=np.random.randn(*const_shape).flatten().astype(np.float32))
node2 = helper.make_node("Constant", [], ["const"], value=const_tensor, name="const")
if trans_is_first_input:
node3 = helper.make_node("Sub", ["Y", "const"], ["Z"], name="sub")
else:
node3 = helper.make_node("Sub", ["const", "Y"], ["Z"], name="sub")
node4 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_b")
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_trans_with_sub",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, io_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, io_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*io_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4, 5), [2, 4, 5, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [2, 4, 5, 6, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_trans_with_sub_input_non_const(self, io_shape, non_const_shape_base, perm_input, perm_output):
non_const_shapes = []
for i in range(len(non_const_shape_base) - 1):
non_const_shapes.append(non_const_shape_base[i:])
for trans_is_first_input in [True, False]:
for non_const_shape in non_const_shapes:
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_a")
if trans_is_first_input:
node2 = helper.make_node("Sub", ["Y", "non_const"], ["Z"], name="sub")
else:
node2 = helper.make_node("Sub", ["non_const", "Y"], ["Z"], name="sub")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_b")
graph = helper.make_graph(
[node1, node2, node3],
"test_trans_with_sub_input_non_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, io_shape),
helper.make_tensor_value_info("non_const", TensorProto.FLOAT, non_const_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, io_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*io_shape).astype(np.float32),
"non_const": np.random.randn(*non_const_shape).astype(np.float32)},
model_proto, remaining_transpose_num=1)
@parameterized.expand([
((1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_input_non_const(self, input_shape1, input_shape2, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Add", ["Y", "A"], ["Z"], name="add")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[node0, node1, node2],
"transpose-add-test-input-non-const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("A", TensorProto.FLOAT, input_shape2)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape1).astype(np.float32),
"A": np.random.randn(*input_shape2).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [4, 2, 3], [2, 0, 1], [1, 2, 0]),
((1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_input_const(self, input_shape1, input_shape2, perm_input, perm_output):
const_1_val = np.random.randn(*input_shape2).astype(np.float32)
const_1 = helper.make_tensor("const_1", TensorProto.FLOAT, input_shape2, const_1_val.flatten())
const_1_node = helper.make_node("Constant", [], ["const_1"], value=const_1, name="const_1")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Add", ["Y", "const_1"], ["Z"], name="add")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
output_shape = input_shape1
graph = helper.make_graph(
[const_1_node, node0, node1, node2],
"transpose-add-test-input-const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 5, 3, 3), (16, 5, 3, 3), (1, 16, 1, 1), (1, 1, 1, 16), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 5, 3, 3, 3), (16, 5, 3, 3, 3), (1, 16, 1, 1, 1), (1, 1, 1, 1, 16), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_conv_1(self, input_shape, weights_shape, output_shape,
const_shape, perm_input, perm_output):
# case where bias's dim is 1D and can be merged into Conv
const_b_val = np.random.randn(*const_shape).astype(np.float32)
const_b = helper.make_tensor("const_b", TensorProto.FLOAT, const_shape, const_b_val.flatten())
const_b_node = helper.make_node("Constant", [], ["const_b"], value=const_b, name="const_b")
node0 = helper.make_node("Conv", ["x", "W"], ["X"], name="conv", pads=[0] * 2 * (len(input_shape) - 2))
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Add", ["Y", "const_b"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[const_b_node, node0, node1, node2, node3],
"transpose-add-test-with-conv-1",
[helper.make_tensor_value_info("x", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("W", TensorProto.FLOAT, weights_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"x": np.random.randn(*input_shape).astype(np.float32),
"W": np.random.randn(*weights_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 1, 5, 5), (1, 1, 3, 3), (1, 1, 3, 3), (1, 3, 3, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 1, 5, 5, 5), (1, 1, 3, 3, 3), (1, 1, 3, 3, 3), (1, 3, 3, 3, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_add_with_conv_2(self, input_shape, weights_shape, output_shape,
const_shape, perm_input, perm_output):
# case where bias's dim is not 1D and can't be merged into Conv
# add handler just remove the transpose around Add node
const_b_val = np.random.randn(*const_shape).astype(np.float32)
const_b = helper.make_tensor("const_b", TensorProto.FLOAT, const_shape, const_b_val.flatten())
const_b_node = helper.make_node("Constant", [], ["const_b"], value=const_b, name="const_b")
node0 = helper.make_node("Conv", ["x", "W"], ["X"], name="conv", pads=[0] * 2 * (len(input_shape) - 2))
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Add", ["Y", "const_b"], ["Z"], name="add")
node3 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[const_b_node, node0, node1, node2, node3],
"transpose-add-test-with-conv-2",
[helper.make_tensor_value_info("x", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("W", TensorProto.FLOAT, weights_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"x": np.random.randn(*input_shape).astype(np.float32),
"W": np.random.randn(*weights_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(10, "pad")
def test_transpose_pad(self, input_shape, output_shape, pads, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y"], ["Z"], pads=pads, name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(11, "pad")
def test_transpose_pad11(self, input_shape, output_shape, pads, perm_input, perm_output):
pads_val = np.array(pads, dtype=np.int64)
pads_tensor = helper.make_tensor("Pads", TensorProto.INT64, [len(input_shape) * 2], pads_val)
pads_const = helper.make_node("Constant", [], ["Pads"], value=pads_tensor, name="Pads")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y", "Pads"], ["Z"], name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2, pads_const],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (8, 4, 6), [1, 3, 0, 0, 2, 0], [2, 0, 1], [1, 2, 0]),
((1, 3, 4, 5), (2, 6, 4, 8), [1, 0, 1, 3, 0, 0, 2, 0], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (2, 5, 6, 8, 10), [1, 0, 1, 3, 1, 0, 2, 2, 1, 1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(11, "pad")
def test_transpose_pad11_non_const_pads(self, input_shape, output_shape, pads, perm_input, perm_output):
pads_val = np.array(pads, dtype=np.int64)
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("Pad", ["Y", "Pads"], ["Z"], name="pad")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-pad-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape),
helper.make_tensor_value_info("Pads", TensorProto.INT64, pads_val.shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"],
{
"X": np.random.randn(*input_shape).astype(np.float32),
"Pads": pads_val
},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), [2, 0, 1], [1, 2, 0]),
((2, 3, 4, 5), [0, 2, 3, 1], [0, 3, 1, 2]),
((2, 3, 4, 5, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_reciprocal(self, shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans1")
node1 = helper.make_node("Reciprocal", ["Y"], ["Z"], name="reciprocal")
node2 = helper.make_node("Transpose", ["Z"], ["OUT"], perm=perm_output, name="trans2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reciprocal-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["OUT"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 1, 1), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_transpose_reducemean(self, input_shape, output_shape, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceMean", ["Y"], ["Z"], axes=list(range(1, len(input_shape) - 1)),
keepdims=1, name="reducemean")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducemean-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [1], [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 4, 1), [2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 3, 1, 1), [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 1, 1, 1), [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 5, 6), [1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [1, 2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 1, 1, 1, 1), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_max_version(12, "ReduceSum from opset <= 12 has axes as an attribute")
def test_transpose_reducesum(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceSum", ["Y"], ["Z"], axes=axes,
keepdims=1, name="reducesum")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducesum-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 3, 4, 5), (1, 3, 4), [2], [0, 2, 3, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3), [1, 2], [0, 2, 3, 1], [0, 1]),
((1, 3, 4, 5), (), [0, 1, 2, 3], [0, 2, 3, 1], []),
((1, 3, 4, 5, 6), (1, 3, 5, 6), [1], [0, 2, 3, 4, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3), [1, 2, 3], [0, 2, 3, 4, 1], [0, 1]),
((1, 3, 4, 5, 6), (), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], []),
])
def test_transpose_reducemax(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceMax", ["Y"], ["Z"], axes=axes,
keepdims=0, name="reducemax")
if perm_output:
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
else:
node2 = helper.make_node("Identity", ["Z"], ["res"], name="trans_2")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducemax-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_transpose_argmax(self):
input_shape = [1, 2, 3, 4]
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
node1 = helper.make_node("ArgMax", ["Y"], ["Z"], axis=3, keepdims=0, name="argmax")
node2 = helper.make_node("Cast", ["Z"], ["res"], to=TensorProto.INT32, name="cast")
graph = helper.make_graph(
[node0, node1, node2],
"transpose-argmax-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.INT32, [1, 3, 4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_transpose_tile(self):
input_shape = [1, 2, 3, 4]
repeats_value = [3, 6, 5, 11]
repeats_tensor = helper.make_tensor("A", TensorProto.INT64, [len(input_shape)], repeats_value)
repeats_const = helper.make_node("Constant", [], ["A"], value=repeats_tensor, name="repeats_const")
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 3, 1], name="trans_1")
node1 = helper.make_node("Tile", ["Y", "A"], ["Z"], name="tile")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=[0, 3, 1, 2], name="trans_2")
graph = helper.make_graph(
[repeats_const, node0, node1, node2],
"transpose-tile-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, [3, 22, 18, 20])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((3, 4, 5), (3, 4, 1), [1], [0, 2, 1], [0, 2, 1]),
((1, 3, 4, 5), (1, 3, 4, 1), [2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 3, 1, 1), [1, 2], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5), (1, 1, 1, 1), [0, 1, 2, 3], [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 3, 4, 5, 6), (1, 3, 1, 5, 6), [1], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 3, 1, 1, 1), [1, 2, 3], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
((1, 3, 4, 5, 6), (1, 1, 1, 1, 1), [0, 1, 2, 3, 4], [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
@check_opset_min_version(13, "ReduceSum from opset >= 13 has axes as an input")
def test_transpose_reducesum_opset_13(self, input_shape, output_shape, axes, perm_input, perm_output):
node0 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm_input, name="trans_1")
node1 = helper.make_node("ReduceSum", ["Y", "axes"], ["Z"], keepdims=1, name="reducesum")
node2 = helper.make_node("Transpose", ["Z"], ["res"], perm=perm_output, name="trans_2")
axes = np.array(axes, dtype=np.int64)
graph = helper.make_graph(
[node0, node1, node2],
"transpose-reducesum-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
[helper.make_tensor("axes", TensorProto.INT64, axes.shape, axes)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*input_shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 3, 4), (4, 2, 3), [2, 0, 1]),
((2, 3, 4, 5), (2, 4, 5, 3), [0, 2, 3, 1]),
((2, 3, 4, 5, 6), (2, 4, 5, 6, 3), [0, 2, 3, 4, 1]),
])
def test_trans_output_as_graph_outputs(self, input_shape, output_shape, perm):
"""
If transpose's output is graph's output, don't optimize it.
"""
trans = helper.make_node("Transpose", ["X"], ["Y"], name="trans", perm=perm)
graph_proto = helper.make_graph(
[trans],
"trans-to-graph-output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, output_shape)],
)
graph = GraphUtil.create_graph_from_onnx_graph(graph_proto)
# remove identity to graph output
identity_op = graph.get_node_by_output(graph.outputs[0])
graph.outputs = [identity_op.input[0]]
graph.remove_node(identity_op.name)
optimized_graph = GraphUtil.optimize_graph(graph)
self.assertTrue(optimized_graph, msg="graph after optimizer should not be None")
trans_cnt = len(group_nodes_by_type(optimized_graph)["Transpose"])
self.assertTrue(trans_cnt == 1, msg="Expect 1 Transpose ops left, but actually " + str(trans_cnt) + " left")
@parameterized.expand([
((2, 3, 4, 1), (2, 3, 4, 1), [0, 3, 1, 2]),
((2, 1, 1, 4), (2, 1, 1, 4), [0, 3, 1, 2]),
((2, 3, 4, 1), (2, -1, -1, 1), [0, 3, 1, 2]),
((2, 3, 4, 2, 1), (2, 3, 4, 2, 1), [0, 4, 1, 2, 3]),
((2, 1, 1, 1, 4), (2, 1, 1, 1, 4), [0, 4, 1, 2, 3]),
((2, 3, 4, 2, 1), (2, -1, -1, -1, 1), [0, 4, 1, 2, 3]),
])
def test_trans_can_be_replaced_with_reshape1(self, input_shape_np, input_shape, perm):
# test trans-NHWC
result_shape = [input_shape[i] for i in perm]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
graph = helper.make_graph(
[node1],
"test_trans_can_be_replaced_with_reshape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, result_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"X": np.random.randn(*input_shape_np).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((2, 1, 3, 4), (2, 1, 3, 4), [0, 2, 3, 1]),
((2, 4, 1, 1), (2, 4, 1, 1), [0, 2, 3, 1]),
((2, 1, 3, 4), (2, 1, -1, -1), [0, 2, 3, 1]),
((2, 1, 3, 4, 2), (2, 1, 3, 4, 2), [0, 2, 3, 4, 1]),
((2, 4, 1, 1, 1), (2, 4, 1, 1, 1), [0, 2, 3, 4, 1]),
((2, 1, 3, 4, 2), (2, 1, -1, -1, -1), [0, 2, 3, 4, 1]),
])
def test_trans_can_be_replaced_with_reshape2(self, input_shape_np, input_shape, perm):
# test trans-NCHW
result_shape = [input_shape[i] for i in perm]
node1 = helper.make_node("Transpose", ["X"], ["Y"], perm=perm, name="trans")
graph = helper.make_graph(
[node1],
"test_trans_can_be_replaced_with_reshape",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, input_shape)],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, result_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["Y"], {"X": np.random.randn(*input_shape_np).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 6, 8), [2, 0, 1], [1, 2, 0]),
((1, 6, 8, 9), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 6, 8, 9, 2), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_two_transposes_switch_with_mul(self, shape, perm_input, perm_output):
const_node = self._make_onnx_const(np.array(np.random.random(6), dtype=np.float32), "const_10")
node0 = helper.make_node("Transpose", ["u1"], ["v1"], perm=perm_input, name="trans_0")
node1 = helper.make_node("Transpose", ["u2"], ["v2"], perm=perm_input, name="trans_1")
node2 = helper.make_node("Mul", ["v1", "v2"], ["x"], name="mul_1")
node3 = helper.make_node("Mul", ["x", const_node.output[0]], ["y"], name="mul_2")
node4 = helper.make_node("Transpose", ["y"], ["res"], perm=perm_output, name="trans_3")
graph = helper.make_graph(
[const_node, node0, node1, node2, node3, node4],
"test-transpose-mul",
[helper.make_tensor_value_info("u1", TensorProto.FLOAT, shape),
helper.make_tensor_value_info("u2", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"u1": np.random.randn(*shape).astype(np.float32),
"u2": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
@parameterized.expand([
((1, 6, 8), (8, 1, 6), [2, 0, 1], [1, 2, 0]),
((1, 6, 8, 9), (1, 8, 9, 6), [0, 2, 3, 1], [0, 3, 1, 2]),
((1, 6, 8, 9, 2), (1, 8, 9, 2, 6), [0, 2, 3, 4, 1], [0, 4, 1, 2, 3]),
])
def test_many_transposes_and_constant_switch_with_sum(self, input_shape1, input_shape2, perm_input, perm_output):
constnode = self._make_onnx_const(np.array(np.random.random(input_shape2), dtype=np.float32), "v4")
node0 = helper.make_node("Transpose", ["u1"], ["v1"], perm=perm_input, name="trans_0")
node1 = helper.make_node("Transpose", ["u2"], ["v2"], perm=perm_input, name="trans_1")
node11 = helper.make_node("Transpose", ["u3"], ["v3"], perm=perm_input, name="trans_2")
node2 = helper.make_node("Sum", ["v1", "v2", "v3", "v4"], ["x"], name="sum_1")
node3 = helper.make_node("Sum", ["x", "v1"], ["y"], name="sum_2")
node4 = helper.make_node("Transpose", ["y"], ["res"], perm=perm_output, name="trans_4")
output_shape = input_shape1
graph = helper.make_graph(
[constnode, node0, node1, node11, node2, node3, node4],
"test-transpose-mul",
[helper.make_tensor_value_info("u1", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("u2", TensorProto.FLOAT, input_shape1),
helper.make_tensor_value_info("u3", TensorProto.FLOAT, input_shape1)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, output_shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"u1": np.random.randn(*input_shape1).astype(np.float32),
"u2": np.random.randn(*input_shape1).astype(np.float32),
"u3": np.random.randn(*input_shape1).astype(np.float32)},
model_proto, remaining_transpose_num=0)
# Tranpose Optimizer Tests End
# Identity Optimizer Tests Start
def run_identity_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
remaining_identity_num=None, debug=False, rtol=1e-07):
self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type="Identity",
remaining_op_num=remaining_identity_num, debug=debug, rtol=rtol)
def test_identity_non_graph_output(self):
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
node2 = helper.make_node("Identity", ["Y"], ["Z"], name="identity")
node3 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")
graph = helper.make_graph(
[node1, node2, node3],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=0)
def test_identity_unremovable_identity(self):
# should not remove!!
node1 = helper.make_node("Identity", ["X"], ["Y"], name="identity")
graph = helper.make_graph(
[node1],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 3, 4, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Y"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=1)
def test_identity_output_as_multiple_graph_outputs(self):
# handle case like this, both Identity nodes are graph outputs,
# Add
# / \
# Identity Identity
# We at most can remove one Identity for this case.
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="identity")
node2 = helper.make_node("Identity", ["Y"], ["Z1"], name="identity2")
node3 = helper.make_node("Identity", ["Y"], ["Z2"], name="identity3")
graph = helper.make_graph(
[node1, node2, node3],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.FLOAT, (2, 3, 4, 5)),
helper.make_tensor_value_info("Z2", TensorProto.FLOAT, (2, 3, 4, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1", "Z2"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=1)
def test_identity_in_subgraph_non_graph_output(self):
node1 = helper.make_node("Add", ["X", "X"], ["Y"], name="add")
iter_num_value = np.array(1, dtype=np.int64)
node2 = helper.make_node(
'Constant',
inputs=[],
outputs=['iterate_num_value'],
value=helper.make_tensor(
name='iterate_num_value',
data_type=TensorProto.INT64,
dims=iter_num_value.shape,
vals=iter_num_value.flatten().astype(np.int64).tolist(),
),
)
cond_value = np.array(True, dtype=np.bool)
node3 = helper.make_node(
'Constant',
inputs=[],
outputs=['cond_value'],
value=helper.make_tensor(
name='cond_value',
data_type=TensorProto.BOOL,
dims=iter_num_value.shape,
vals=cond_value.flatten().astype(np.bool).tolist(),
),
)
# sub graph
sub_node1 = helper.make_node("Add", ["loop_var_1", "loop_var_1"], ["SubY"], name="sub_add")
sub_node2 = helper.make_node("Identity", ["SubY"], ["SubIdentity1"], name="sub_identity_1")
sub_node3 = helper.make_node("Identity", ["SubIdentity1"], ["loop_var_out_1"], name="sub_identity_2")
sub_node4 = helper.make_node("Identity", ["loop_condition"], ["loop_cond_output"], name="sub_identity_3")
sub_graph = helper.make_graph(
[sub_node1, sub_node2, sub_node3, sub_node4],
"identity_subgraph-test",
[helper.make_tensor_value_info("loop_iter_num", TensorProto.INT64, (1,)), # iteration_num
helper.make_tensor_value_info("loop_condition", TensorProto.BOOL, ()), # condition
helper.make_tensor_value_info("loop_var_1", TensorProto.FLOAT, ()), # loop-carried dependency
],
[helper.make_tensor_value_info("loop_cond_output", TensorProto.BOOL, ()),
helper.make_tensor_value_info("loop_var_out_1", TensorProto.FLOAT, ())
],
)
# sub graph ends
loop_node = helper.make_node("Loop", ["iterate_num_value", "cond_value", "Y"], ["loop_var_1_output"],
name="loop", body=sub_graph)
node4 = helper.make_node("Identity", ["loop_var_1_output"], ["Z"], name="identity")
node5 = helper.make_node("Shape", ["Z"], ["Z1"], name="shape")
graph = helper.make_graph(
[node1, node2, node3, loop_node, node4, node5],
"identity-test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3, 4, 5))],
[helper.make_tensor_value_info("Z1", TensorProto.INT64, [4])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_identity_compare(["Z1"], {"X": np.random.randn(2, 3, 4, 5).astype(np.float32)},
model_proto, remaining_identity_num=0)
# Identity Optimizer Tests End
# Merge Duplicated Nodes Optimizer Tests Start
def run_merge_duplicated_nodes_compare(self, output_names_with_port, onnx_feed_dict, origin_proto,
op_type=None, remaining_op_num=None, debug=False, rtol=1e-07,
graph_validator=None):
new_proto = self.run_and_compare(output_names_with_port, onnx_feed_dict, origin_proto, op_type=op_type,
remaining_op_num=remaining_op_num, debug=debug, rtol=rtol)
if graph_validator:
self.assertTrue(graph_validator(new_proto.graph))
def test_duplicated_duplicated_input(self):
# same input or not
node0 = helper.make_node('Add', inputs=["X", "X"], outputs=["value0"])
node1 = helper.make_node('Add', inputs=["X", "X"], outputs=["value1"])
node2 = helper.make_node('Add', inputs=["value1", "X"], outputs=["value2"])
node3 = helper.make_node("Mul", ["value0", "value2"], ["value3"])
node4 = helper.make_node("Mul", ["value1", "value3"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4],
"test_duplicated_duplicated_input",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (5, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="Add", remaining_op_num=2)
def test_duplicated_duplicated_attributes(self):
# same attr or not
node0 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value0"], axes=[0], keepdims=0)
node1 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value1"], axes=[0], keepdims=0)
node2 = helper.make_node('ReduceMin', inputs=["X"], outputs=["value2"], axes=[1], keepdims=0)
node3 = helper.make_node('Add', inputs=["value0", "value1"], outputs=["value3"])
node4 = helper.make_node("Mul", ["value2", "value3"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4],
"test_duplicated_duplicated_attributes",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="ReduceMin", remaining_op_num=2)
def _check_initializer_num(self, graph_proto, num):
return num == len(graph_proto.initializer)
def test_duplicated_duplicated_constant(self):
const_val = np.array([1, 2, 3], dtype=np.float32)
tensor_1 = helper.make_tensor("tensor_1", TensorProto.FLOAT, const_val.shape, const_val)
tensor_2 = helper.make_tensor("tensor_2", TensorProto.FLOAT, const_val.shape, const_val)
tensor_3 = helper.make_tensor("tensor_3", TensorProto.FLOAT, const_val.shape, const_val)
tensor_4 = helper.make_tensor("tensor_4", TensorProto.FLOAT, const_val.shape, const_val)
node0 = helper.make_node('Constant', inputs=[], outputs=["value0"], value=tensor_1)
node1 = helper.make_node('Constant', inputs=[], outputs=["value1"], value=tensor_2)
node2 = helper.make_node('Constant', inputs=[], outputs=["value2"], value=tensor_3)
node3 = helper.make_node('Constant', inputs=[], outputs=["value3"], value=tensor_4)
node4 = helper.make_node("Mul", ["value0", "value1"], ["output1"])
node5 = helper.make_node("Mul", ["value2", "output1"], ["output2"])
node6 = helper.make_node("Mul", ["value3", "output2"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node6],
"test_duplicated_duplicated_constant",
[],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (3,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {}, model_proto, op_type="Constant", remaining_op_num=0,
graph_validator=lambda g: self._check_initializer_num(g, 1))
def test_duplicated_duplicated_constant_and_initializer(self):
const_val = np.array([1, 2, 3], dtype=np.float32)
tensor_1 = helper.make_tensor("value0", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
tensor_2 = helper.make_tensor("value1", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
tensor_3 = helper.make_tensor("value2", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
tensor_4 = helper.make_tensor("value3", TensorProto.FLOAT, const_val.shape, const_val.tobytes(), raw=True)
node0 = helper.make_node('Constant', inputs=[], outputs=["value0"], value=tensor_1)
node1 = helper.make_node('Constant', inputs=[], outputs=["value1"], value=tensor_2)
node4 = helper.make_node("Mul", ["value0", "value1"], ["output1"])
node5 = helper.make_node("Mul", ["value2", "output1"], ["output2"])
node6 = helper.make_node("Mul", ["value3", "output2"], ["OUT"])
graph = helper.make_graph(
[node0, node1, node4, node5, node6],
"test_duplicated_duplicated_constant",
[helper.make_tensor_value_info("value2", TensorProto.FLOAT, (3,))],
[helper.make_tensor_value_info("OUT", TensorProto.FLOAT, (3,))],
[tensor_3, tensor_4]
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["OUT"], {}, model_proto, op_type="Constant", remaining_op_num=0,
graph_validator=lambda g: self._check_initializer_num(g, 2))
def test_duplicated_node_is_graph_output(self):
node0 = helper.make_node('Add', inputs=["X", "X"], outputs=["value0"])
node1 = helper.make_node('Add', inputs=["X", "X"], outputs=["value1"])
node2 = helper.make_node('Add', inputs=["value1", "X"], outputs=["value2"])
graph = helper.make_graph(
[node0, node1, node2],
"test_duplicated_node_is_graph_output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5, 5))],
[helper.make_tensor_value_info("value1", TensorProto.FLOAT, (5, 5)),
helper.make_tensor_value_info("value2", TensorProto.FLOAT, (5, 5))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["value1", "value2"],
{"X": np.random.randn(5, 5).astype(np.float32)}, model_proto,
op_type="Add", remaining_op_num=2)
@check_opset_min_version(10, "Dropout in opset 10 produces mask of 'bool' type")
def test_duplicated_different_output_length(self):
node0 = helper.make_node('Dropout', inputs=["X"], outputs=["value0"])
node1 = helper.make_node('Dropout', inputs=["X"], outputs=["value1", "mask"])
node2 = helper.make_node('Dropout', inputs=["value1"], outputs=["value2"])
graph = helper.make_graph(
[node0, node1, node2],
"test_duplicated_different_output_length",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("value1", TensorProto.FLOAT, (5,)),
helper.make_tensor_value_info("mask", TensorProto.BOOL, (5,)),
helper.make_tensor_value_info("value2", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["value1", "mask", "value2"],
{"X": np.random.randn(5).astype(np.float32)},
model_proto,
op_type="Dropout", remaining_op_num=2)
def test_duplicated_need_multiple_run(self):
node00 = helper.make_node('Log', inputs=["X"], outputs=["value00"])
node01 = helper.make_node('Log', inputs=["value00"], outputs=["value01"])
node02 = helper.make_node('Log', inputs=["value01"], outputs=["value02"])
node10 = helper.make_node('Log', inputs=["X"], outputs=["value10"])
node11 = helper.make_node('Log', inputs=["value10"], outputs=["value11"])
node12 = helper.make_node('Log', inputs=["value11"], outputs=["value12"])
res = helper.make_node('Add', inputs=["value02", "value12"], outputs=["res"])
graph = helper.make_graph(
[node00, node01, node02, node10, node11, node12, res],
"test_duplicated_node_is_graph_output",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (5,))],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, (5,))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_merge_duplicated_nodes_compare(["res"], {"X": np.random.randn(5).astype(np.float32)},
model_proto,
op_type="Log", remaining_op_num=3)
# Merge Duplicated Nodes Optimizer Tests End
# Reshape Optimizer Tests Start
@parameterized.expand([
(["dims12", "dim0_unsq"], 0, 1, 3), # Reshape [3, 7, 11] -> [7, 11, 3]
(["dim0_unsq", "dims12"], 2, 0, 2), # Reshape [3, 7, 11] -> [11, 3, 7]
])
def test_reshape_opt(self, concat_order, gather_i, starts, ends):
x_shape = [3, 7, 11]
node0 = helper.make_node("Shape", ["X"], ["S"])
g_indices_tensor = helper.make_tensor(name='g_indices_tensor', data_type=TensorProto.INT64, dims=[],
vals=np.array([gather_i], np.int64))
starts_tensor = helper.make_tensor(name='starts_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([starts], np.int64))
ends_tensor = helper.make_tensor(name='ends_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([ends], np.int64))
axes_tensor = helper.make_tensor(name='axes_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], np.int64))
node1 = helper.make_node("Constant", [], ["g_indices"], value=g_indices_tensor)
node2 = helper.make_node("Constant", [], ["starts"], value=starts_tensor)
node3 = helper.make_node("Constant", [], ["ends"], value=ends_tensor)
node4 = helper.make_node("Constant", [], ["axes"], value=axes_tensor)
node5 = helper.make_node("Gather", ["S", "g_indices"], ["dim0"])
if self.config.opset >= 10:
node6 = helper.make_node("Slice", ["S", "starts", "ends", "axes"], ["dims12"])
else:
node6 = helper.make_node("Slice", ["S"], ["dims12"], starts=[starts], ends=[ends], axes=[0])
if self.config.opset >= 13:
node7 = helper.make_node("Unsqueeze", ["dim0", "axes"], ["dim0_unsq"])
else:
node7 = helper.make_node("Unsqueeze", ["dim0"], ["dim0_unsq"], axes=[0])
node8 = helper.make_node("Concat", concat_order, ["dims120"], axis=0)
node9 = helper.make_node("Reshape", ["X", "dims120"], ["Y"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node6, node7, node8, node9],
"test_reshape_opt1",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, None, None])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["Y"], {"X": np.random.randn(*x_shape).astype(np.float32)},
model_proto, op_type="Shape", remaining_op_num=0)
def test_reshape_opt_with_mul(self):
x_shape = [7, 10, 20, 30]
node0 = helper.make_node("Shape", ["X"], ["S"])
g_indices_tensor = helper.make_tensor(name='g_indices_tensor', data_type=TensorProto.INT64, dims=[2],
vals=np.array([1, 2], np.int64))
starts_tensor = helper.make_tensor(name='starts_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], np.int64))
ends_tensor = helper.make_tensor(name='ends_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([1], np.int64))
axes_tensor = helper.make_tensor(name='axes_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([0], np.int64))
five_tensor = helper.make_tensor(name='five_tensor', data_type=TensorProto.INT32, dims=[],
vals=np.array([5], np.int32))
six_tensor = helper.make_tensor(name='six_tensor', data_type=TensorProto.INT64, dims=[1],
vals=np.array([6], np.int64))
node1 = helper.make_node("Constant", [], ["g_indices"], value=g_indices_tensor)
node2 = helper.make_node("Constant", [], ["starts"], value=starts_tensor)
node3 = helper.make_node("Constant", [], ["ends"], value=ends_tensor)
node4 = helper.make_node("Constant", [], ["axes"], value=axes_tensor)
node5 = helper.make_node("Constant", [], ["five"], value=five_tensor)
node55 = helper.make_node("Constant", [], ["six"], value=six_tensor)
node6 = helper.make_node("Gather", ["S", "g_indices"], ["dims12"])
node7 = helper.make_node("ReduceProd", ["dims12"], ["dims12_prod"], axes=[0])
if self.config.opset >= 10:
node8 = helper.make_node("Slice", ["S", "starts", "ends", ""], ["dim0"])
else:
node8 = helper.make_node("Slice", ["S"], ["dim0"], starts=[0], ends=[1])
node9 = helper.make_node("Cast", ["dim0"], ["dim0_cast"], to=TensorProto.INT32)
if self.config.opset >= 13:
node10 = helper.make_node("Squeeze", ["dim0_cast", "axes"], ["dim0_sq"])
else:
node10 = helper.make_node("Squeeze", ["dim0_cast"], ["dim0_sq"], axes=[0])
node11 = helper.make_node("Mul", ["dim0_sq", "five"], ["five_dim0"])
if self.config.opset >= 13:
node12 = helper.make_node("Unsqueeze", ["five_dim0", "axes"], ["five_dim0_unsq"])
else:
node12 = helper.make_node("Unsqueeze", ["five_dim0"], ["five_dim0_unsq"], axes=[0])
node13 = helper.make_node("Cast", ["five_dim0_unsq"], ["five_dim0_cast"], to=TensorProto.INT64)
node14 = helper.make_node("Concat", ["five_dim0_cast", "dims12_prod", "six"], ["shape"], axis=0)
node15 = helper.make_node("Reshape", ["X", "shape"], ["Y"])
graph = helper.make_graph(
[node0, node1, node2, node3, node4, node5, node55, node6, node7, node8, node9, node10,
node11, node12, node13, node14, node15],
"test_reshape_opt1",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, [None, 10, 20, 30])],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, [None, None, None])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["Y"], {"X": np.random.randn(*x_shape).astype(np.float32)},
model_proto, op_type="Shape", remaining_op_num=0)
# Reshape Optimizer Tests End
# Const Fold Optimizer Tests Start
def test_const_fold_trans_with_const1(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Transpose", ["const"], ["value1"])
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_trans_with_const1",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_const_fold_trans_with_const2(self):
# need multiple optimization run
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Transpose", ["const"], ["value1"])
node3 = helper.make_node("Transpose", ["value1"], ["value2"])
node4 = helper.make_node("Add", ["value2", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_const_fold_trans_with_const2",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, shape)],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {"X": np.random.randn(*shape).astype(np.float32)},
model_proto, remaining_transpose_num=0)
def test_const_fold_node_is_output(self):
# need multiple optimization run
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Transpose", ["const"], ["value1"])
node3 = helper.make_node("Transpose", ["value1"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_node_is_output",
[],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_transpose_compare(["res"], {},
model_proto, remaining_transpose_num=0)
def test_const_fold_concat(self):
shape = (6, 4)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
const_tensor2 = helper.make_tensor(name='const_tensor2', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Constant", [], ["const2"], value=const_tensor2)
node3 = helper.make_node("Concat", ["const", "const2", "const"], ["value1"], axis=1)
node4 = helper.make_node("Add", ["value1", "inp"], ["res"])
graph = helper.make_graph(
[node1, node2, node3, node4],
"test_const_fold_trans_with_const2",
[helper.make_tensor_value_info("inp", TensorProto.FLOAT, [6, 12])],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, [6, 12])],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"inp": np.random.randn(6, 12).astype(np.float32)}, model_proto,
"Concat", 0)
@check_opset_max_version(12, "Squeeze/Unsqueeze changed in opset 13")
def test_const_fold_unsqueeze_with_const(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Unsqueeze", ["const"], ["value1"], axes=[0, 2, 3])
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_unsqueeze_with_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, (1, 6, 1, 1, 6))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"X": np.random.randn(1).astype(np.float32)}, model_proto,
"Unsqueeze", 0)
@check_opset_min_version(13, "Squeeze/Unsqueeze changed in opset 13")
def test_const_fold_unsqueeze_with_const_13(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
axes = self._make_onnx_const(np.array([0, 2, 3], dtype=np.int64), "axes")
node2 = helper.make_node("Unsqueeze", ["const", "axes"], ["value1"])
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3, axes],
"test_const_fold_unsqueeze_with_const",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (1,))],
[helper.make_tensor_value_info("res", TensorProto.FLOAT, (1, 6, 1, 1, 6))],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"X": np.random.randn(1).astype(np.float32)}, model_proto,
"Unsqueeze", 0)
def test_const_fold_cast_with_const(self):
shape = (6, 6)
const_tensor = helper.make_tensor(name='const_tensor', data_type=TensorProto.FLOAT, dims=shape,
vals=np.random.randn(*shape).flatten().astype(np.float32))
node1 = helper.make_node("Constant", [], ["const"], value=const_tensor)
node2 = helper.make_node("Cast", ["const"], ["value1"], to=TensorProto.INT64)
node3 = helper.make_node("Add", ["value1", "X"], ["res"])
graph = helper.make_graph(
[node1, node2, node3],
"test_const_fold_cast_with_const",
[helper.make_tensor_value_info("X", TensorProto.INT64, shape)],
[helper.make_tensor_value_info("res", TensorProto.INT64, shape)],
)
model_proto = self.make_model(graph, producer_name="onnx-tests")
self.run_and_compare(["res"], {"X": | np.random.randn(*shape) | numpy.random.randn |
"""
This file is used to pre-process voxlization and aggregation weights, in order to save training time.
Re-project simplified point clouds to multi-plane, 32 planes are used.
"""
from __future__ import division
import numpy as np
import os, cv2, time, math, scipy
import scipy.io as io
def loadfile(ply_path):
st = time.time()
position = []
color = []
file = open(ply_path)
begin = False
while 1:
line = file.readline().strip('\n')
if not line:
break
line = line.split(' ')
if begin:
position.append(np.array([float(line[0]), float(line[1]), float(line[2]), float(1.0)]))
color.append(np.array([float(line[5]), float(line[4]), float(line[3])])) # rgb to bgr
if line[0] == 'end_header':
begin = True
file.close()
print('load ply time: %s' %(time.time() - st))
return np.transpose(position), np.transpose(color)
def makedataset(dir2):
image_names = []
depth_names = []
intrinsics = []
extrinsics = []
assert os.path.isdir(dir2)
parameter_file = []
for root,_, fname in os.walk(dir2):
parameter_file.append(os.path.join(dir2, fname[0]))
file = open(parameter_file[0])
while True:
line = file.readline()
if not line:
break
temp = line.split()
if len(temp) == 0:
continue
if temp[0] == 'intrinsics_matrix':
intrinsic_temp = line
if temp[0] == 'scan':
extrinsics.append(line)
intrinsics.append(intrinsic_temp)
image_names.append(temp[2])
depth_names.append(temp[1])
positions_world = np.zeros([len(extrinsics), 3])
for i in range(len(extrinsics)):
temp = extrinsics[i].split()
positions_world[i, 0] = np.float32(temp[6])
positions_world[i, 1] = np.float32(temp[10])
positions_world[i, 2] = np.float32(temp[14])
return image_names, depth_names, intrinsics, extrinsics, positions_world
def camera_parameter_read(intrinsic, extrinsic):
# tmp = intrinsics_all[i].split()
tmp = intrinsic.split()
fx = float(tmp[1])
ux = float(tmp[3])
fy = float(tmp[5])
uy = float(tmp[6])
intrinsic_matrix = np.array([[fx, 0, ux, 0], [0, fy, 1024 - uy, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
tmp = extrinsic.split()
tmp = list(map(float, tmp[3:]))
extrinsic_matrix = np.reshape(np.array(tmp), [4, 4])
extrinsic_matrix[:, [1, 2]] = extrinsic_matrix[:, [1, 2]] * (-1.0) # Camera coordinate system transform.
return intrinsic_matrix, extrinsic_matrix
def Voxelization(w, h, intrinsic_matrix, extrinsic_matrix, point_clouds, valid_depth_near, valid_depth_far, num_planes):
st = time.time()
transform_matrix = intrinsic_matrix.dot(np.linalg.inv(extrinsic_matrix))
position_image = transform_matrix.dot(point_clouds)
print('reproject_time: %s' %(time.time() - st))
depth_all = position_image[2, :]
u_all =position_image[0, :] / (depth_all+1e-10)
v_all =position_image[1, :] / (depth_all+1e-10)
valid_u = np.where((u_all >= 0) & (u_all <= (w-1)))
valid_v = np.where((v_all >= 0) & (v_all <= (h-1)))
valid_d = np.where((depth_all > valid_depth_near) & (depth_all < valid_depth_far))
valid_position = np.intersect1d(valid_u, valid_v)
valid_position = np.intersect1d(valid_position, valid_d)
selected_depth = depth_all[valid_position]
index = np.argsort(-selected_depth) # depth large to small
index = index[100:-50] # in order to reduce outliers' influence during voxelization, we remove 100 furthest and 50 nearest points.
valid_position_sorted = valid_position[index]
valid_d_sorted = depth_all[valid_position_sorted]
center_u_sorted = u_all[valid_position_sorted]
center_v_soretd = v_all[valid_position_sorted]
u_sorted = np.uint32(np.rint(center_u_sorted))
v_sorted = np.uint32(np.rint(center_v_soretd))
# calculate distance to grid center. Parallel distance.
st = time.time()
distance_sorted = np.sqrt(np.square(u_sorted - center_u_sorted) + np.square(v_sorted - center_v_soretd))
print("calculate_distance: %s" % (time.time() - st))
# 3D space voxelization
num_valids = len(index)
valid_d_min = valid_d_sorted[num_valids - 1] # near depth plane
valid_d_max = valid_d_sorted[0] # far depth plane
tmp = np.linspace(valid_d_max, valid_d_min, num_planes+1)
up_boundary = tmp[1:]
d_position = np.zeros([num_valids]) # points belong to which plane.
st = time.time()
cnt = 0
for i in range(num_valids):
tmp_d = valid_d_sorted[i]
if tmp_d >= up_boundary[cnt]:
d_position[i] = num_planes - cnt - 1
else:
for j in range(1, num_planes - cnt):
cnt = cnt + 1
if tmp_d >= up_boundary[cnt]:
d_position[i] = num_planes - cnt - 1
break
print('split_time: %s' % (time.time() - st))
# grouping
groups_original = u_sorted + v_sorted*w + d_position*w*h # groups
groups_original_sort_index = np.argsort(groups_original) # small to large
groups_original_sorted = groups_original[groups_original_sort_index]
u_sorted_1 = u_sorted[groups_original_sort_index]
v_sorted_1 = v_sorted[groups_original_sort_index]
d_position_sorted_1 = d_position[groups_original_sort_index]
valid_position_sorted_1 = valid_position_sorted[groups_original_sort_index]
distance_sorted_1 = distance_sorted[groups_original_sort_index]
array = np.uint16(np.linspace(0, 1000, 1000, endpoint=False)) # assign points within one voxel or group a sequence index. Begin from 0. The max num in each group less than 1000.
groups_index = np.zeros_like(valid_position_sorted_1) # each group's start position.
groups_each = np.zeros_like(valid_position_sorted_1) # each point belongs to which group or voxel.
groups_each_index = np.zeros_like(valid_position_sorted_1, dtype=np.uint16) # each point's index/order in one group, a sequence.
group_begin = 0
cnt = 0
for ii in range(num_valids):
group_tmp = groups_original_sorted[ii]
if (ii + 1) < num_valids:
group_next = groups_original_sorted[ii+1]
if not group_tmp == group_next:
groups_each[group_begin:(ii+1)] = cnt
groups_each_index[group_begin:(ii+1)] = array[0:(ii+1 - group_begin)]
groups_index[cnt] = group_begin
cnt = cnt + 1
group_begin = ii + 1
else:
groups_each[group_begin:] = cnt
groups_each_index[group_begin:] = array[0:(num_valids-group_begin)]
groups_index[cnt] = group_begin
groups_index = groups_index[0:(cnt+1)]
print('group_time: %s' % (time.time() - st))
# calculate max num of points in one group/voxel in each plane.
split_each_max = np.zeros(num_planes, dtype=np.uint16)
split_position = np.where((d_position_sorted_1[groups_index] - np.concatenate((np.array([0]), d_position_sorted_1[groups_index][0:-1]))) > 0) # find split position of different planes.
split_each_begin = np.concatenate((np.array([0]), groups_index[split_position])) # split position based on all points, and reserve the begin position. Begin from 0.
split_each_begin_in_group = np.concatenate((np.array([0]), split_position[0])) # split position based on all groups, and reserve the begin position. Begin from 0.
d_valid = d_position_sorted_1[groups_index[split_each_begin_in_group]]
for j in range(len(split_each_begin)):
begin = split_each_begin[j]
if j < (len(split_each_begin_in_group) - 1):
end = split_each_begin[j + 1]
max_num = np.max(groups_each_index[begin:end]) + 1
split_each_max[int(d_valid[j])] = max_num
else:
max_num = np.max(groups_each_index[begin:]) + 1
split_each_max[int(d_valid[j])] = max_num
# Be careful of data type, out of range.
return np.uint16(u_sorted_1), np.uint16(v_sorted_1), np.uint8(d_position_sorted_1), np.uint32(valid_position_sorted_1), \
np.uint32(groups_each), np.uint32(groups_index), np.uint16(groups_each_index), \
np.uint32(split_each_begin), np.uint32(split_each_begin_in_group), np.uint16(split_each_max), \
np.float16(distance_sorted_1)
def Aggregation(npzfile, intrinsic_matrix, extrinsic_matrix, point_clouds, a, b):
select_index = npzfile['select_index'] # select_index begin with 0.
index_in_each_group = npzfile['index_in_each_group']
distance = npzfile['distance']
st = time.time()
transform_matrix = intrinsic_matrix.dot(np.linalg.inv(extrinsic_matrix))
position_image = transform_matrix.dot(point_clouds)
depth_all = position_image[2, :]
depth_selected = depth_all[select_index] * 100 # x 100, m to cm.
# distance to grid center, parallel distance
distance = distance
# distance to depth_min, vertical distance
distance_1 = np.zeros(distance.shape)
each_group_begin = np.where(index_in_each_group == 0)[0]
num_valids = len(select_index)
num_groups = len(each_group_begin)
for i in range(num_groups):
begin = each_group_begin[i]
if (i+1) < num_groups:
end = each_group_begin[i+1]
distance_1[begin:end] = np.min(depth_selected[begin:end])
else:
end = num_valids
distance_1[begin:end] = np.min(depth_selected[begin:end])
distance_1 = depth_selected - distance_1
# print(np.max(distance_1))
# print(np.min(distance_1))
# calculate_weight
weight_1 = (1-distance)**a
weight_2 = 1/(1+distance_1)**b
weight_renew = weight_1*weight_2
weight_average = | np.float16(weight_renew) | numpy.float16 |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
x = np.genfromtxt('./logistic_x.txt')
m, n = x.shape
# add bias columns to x
x = np.hstack([ | np.ones((m, 1)) | numpy.ones |
import abc
import typing
import numpy as np
import torch
ENTRY_NOT_EXTRACTED_ERR_MSG = 'Transform can not be applied because entry "{}" was not extracted'
# follows the principle of torchvision transform
class Transform(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __call__(self, sample: dict) -> dict:
pass
class ComposeTransform(Transform):
def __init__(self, transforms: typing.Iterable[Transform]) -> None:
self.transforms = transforms
def __call__(self, sample: dict) -> dict:
for t in self.transforms:
sample = t(sample)
return sample
class IntensityRescale(Transform):
def __init__(self, lower, upper, loop_axis=None, entries=('images',)) -> None:
super().__init__()
self.lower = lower
self.upper = upper
self.loop_axis = loop_axis
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
np_entry = check_and_return(sample[entry], np.ndarray)
if self.loop_axis is None:
np_entry = self._normalize(np_entry, self.lower, self.upper)
else:
slicing = [slice(None) for _ in range(np_entry.ndim)]
for i in range(np_entry.shape[self.loop_axis]):
slicing[self.loop_axis] = i
np_entry[tuple(slicing)] = self._normalize(np_entry[tuple(slicing)], self.lower, self.upper)
sample[entry] = np_entry
return sample
@staticmethod
def _normalize(arr: np.ndarray, lower, upper):
dtype = arr.dtype
min_, max_ = arr.min(), arr.max()
if min_ == max_:
raise ValueError('cannot normalize when min == max')
arr = (arr - min_) / (max_ - min_) * (upper - lower) + lower
return arr.astype(dtype)
class IntensityNormalization(Transform):
def __init__(self, loop_axis=None, entries=('images',)) -> None:
super().__init__()
self.loop_axis = loop_axis
self.entries = entries
self.normalize_fn = self._normalize
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
np_entry = check_and_return(sample[entry], np.ndarray)
if not np.issubdtype(np_entry.dtype, np.floating):
raise ValueError('Array must be floating type')
if self.loop_axis is None:
np_entry = self.normalize_fn(np_entry)
else:
slicing = [slice(None) for _ in range(np_entry.ndim)]
for i in range(np_entry.shape[self.loop_axis]):
slicing[self.loop_axis] = i
np_entry[tuple(slicing)] = self.normalize_fn(np_entry[tuple(slicing)])
sample[entry] = np_entry
return sample
@staticmethod
def _normalize(arr: np.ndarray):
return (arr - arr.mean()) / arr.std()
class LambdaTransform(Transform):
def __init__(self, lambda_fn, loop_axis=None, entries=('images',)) -> None:
super().__init__()
self.lambda_fn = lambda_fn
self.loop_axis = loop_axis
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
if self.loop_axis is None:
np_entry = self.lambda_fn(sample[entry])
else:
np_entry = check_and_return(sample[entry], np.ndarray)
slicing = [slice(None) for _ in range(np_entry.ndim)]
for i in range(np_entry.shape[self.loop_axis]):
slicing[self.loop_axis] = i
np_entry[tuple(slicing)] = self.lambda_fn(np_entry[tuple(slicing)])
sample[entry] = np_entry
return sample
class ClipPercentile(Transform):
def __init__(self, upper_percentile: float, lower_percentile: float=None,
loop_axis=None, entries=('images',)) -> None:
super().__init__()
self.upper_percentile = upper_percentile
if lower_percentile is None:
lower_percentile = 100 - upper_percentile
self.lower_percentile = lower_percentile
self.loop_axis = loop_axis
self.entries = entries
def __call__(self, sample: dict) -> dict:
for entry in self.entries:
if entry not in sample:
raise ValueError(ENTRY_NOT_EXTRACTED_ERR_MSG.format(entry))
np_entry = check_and_return(sample[entry], np.ndarray)
if self.loop_axis is None:
np_entry = self._clip(np_entry)
else:
slicing = [slice(None) for _ in range(np_entry.ndim)]
for i in range(np_entry.shape[self.loop_axis]):
slicing[self.loop_axis] = i
np_entry[tuple(slicing)] = self._clip(np_entry[tuple(slicing)])
sample[entry] = np_entry
return sample
def _clip(self, arr: np.ndarray):
upper_max = | np.percentile(arr, self.upper_percentile) | numpy.percentile |
import h5py
import pickle
import numpy as np
def load_weights():
fff = h5py.File('Mybase/mask_rcnn_coco.h5','r') #打开h5文件
#print(list(f.keys()))
mydict = {}
mydict['global_step:0'] = 1000
########res1########
dset = fff['conv1']
a = dset['conv1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn_conv1']
a = dset['bn_conv1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
########res2########
dset = fff['res2a_branch1']
a = dset['res2a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch1']
a = dset['bn2a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2a']
a = dset['res2a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2a']
a = dset['bn2a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2b']
a = dset['res2a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2b']
a = dset['bn2a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2a_branch2c']
a = dset['res2a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2a_branch2c']
a = dset['bn2a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res2b_branch2a']
a = dset['res2b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2a']
a = dset['bn2b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2b']
a = dset['res2b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2b']
a = dset['bn2b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2b_branch2c']
a = dset['res2b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2b_branch2c']
a = dset['bn2b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res2c_branch2a']
a = dset['res2c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2a']
a = dset['bn2c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2b']
a = dset['res2c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2b']
a = dset['bn2c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res2c_branch2c']
a = dset['res2c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn2c_branch2c']
a = dset['bn2c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_0/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res3########
dset = fff['res3a_branch1']
a = dset['res3a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch1']
a = dset['bn3a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2a']
a = dset['res3a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2a']
a = dset['bn3a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2b']
a = dset['res3a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2b']
a = dset['bn3a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3a_branch2c']
a = dset['res3a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3a_branch2c']
a = dset['bn3a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res3b_branch2a']
a = dset['res3b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2a']
a = dset['bn3b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2b']
a = dset['res3b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2b']
a = dset['bn3b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3b_branch2c']
a = dset['res3b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3b_branch2c']
a = dset['bn3b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3c_branch2a']
a = dset['res3c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2a']
a = dset['bn3c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2b']
a = dset['res3c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2b']
a = dset['bn3c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3c_branch2c']
a = dset['res3c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3c_branch2c']
a = dset['bn3c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res3d_branch2a']
a = dset['res3d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2a']
a = dset['bn3d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2b']
a = dset['res3d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2b']
a = dset['bn3d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res3d_branch2c']
a = dset['res3d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn3d_branch2c']
a = dset['bn3d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_1/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
########res4########
dset = fff['res4a_branch1']
a = dset['res4a_branch1']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch1']
a = dset['bn4a_branch1']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2a']
a = dset['res4a_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2a']
a = dset['bn4a_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2b']
a = dset['res4a_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2b']
a = dset['bn4a_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4a_branch2c']
a = dset['res4a_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4a_branch2c']
a = dset['bn4a_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_0/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
################################
dset = fff['res4b_branch2a']
a = dset['res4b_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2a']
a = dset['bn4b_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2b']
a = dset['res4b_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2b']
a = dset['bn4b_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4b_branch2c']
a = dset['res4b_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4b_branch2c']
a = dset['bn4b_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_1/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4c_branch2a']
a = dset['res4c_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2a']
a = dset['bn4c_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2b']
a = dset['res4c_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2b']
a = dset['bn4c_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4c_branch2c']
a = dset['res4c_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4c_branch2c']
a = dset['bn4c_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_2/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4d_branch2a']
a = dset['res4d_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2a']
a = dset['bn4d_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2b']
a = dset['res4d_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2b']
a = dset['bn4d_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4d_branch2c']
a = dset['res4d_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4d_branch2c']
a = dset['bn4d_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_3/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4e_branch2a']
a = dset['res4e_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2a']
a = dset['bn4e_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2b']
a = dset['res4e_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2b']
a = dset['bn4e_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4e_branch2c']
a = dset['res4e_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4e_branch2c']
a = dset['bn4e_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_4/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4f_branch2a']
a = dset['res4f_branch2a']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2a']
a = dset['bn4f_branch2a']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_0/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2b']
a = dset['res4f_branch2b']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2b']
a = dset['bn4f_branch2b']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn_relu1_1/conv_bn1_0/batchnorm1_0/BatchNorm/beta:0' ] = h
#########
dset = fff['res4f_branch2c']
a = dset['res4f_branch2c']
b = np.array(a['kernel:0'], dtype=np.float32)
c = np.array(a['bias:0' ], dtype=np.float32)
dset = fff['bn4f_branch2c']
a = dset['bn4f_branch2c']
d = np.array(a['beta:0' ], dtype=np.float32)
e = np.array(a['gamma:0'], dtype=np.float32)
f = np.array(a['moving_mean:0' ], dtype=np.float32)
g = np.array(a['moving_variance:0'], dtype=np.float32)
h = ((c - f) / g) * e + d
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/conv1_0/weights:0'] = b
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/gamma:0'] = e
mydict['layers_module1_1/resnet_block2_0_2/resnet_unit2_5/conv_bn1_1/batchnorm1_0/BatchNorm/beta:0' ] = h
############################
dset = fff['res4g_branch2a']
a = dset['res4g_branch2a']
b = | np.array(a['kernel:0'], dtype=np.float32) | numpy.array |
from models import CNN2
from core.Optimizers import sgd, bgd
from core.Functions import one_hot_f
import numpy as np
from tensorflow import keras
from core.Dataloader import batch_iterator
def test(model, test_inputs, test_labels):
num_of_sample = test_inputs.shape[0]
cnt_correct, cnt_tot = 0, 0
for i in range(num_of_sample):
test_input = test_inputs[i:i + 1]
test_label = test_labels[i]
res = model.forward_prop(test_input)
if np.argmax(res) == np.argmax(test_label):
cnt_correct += 1
cnt_tot += 1
return cnt_correct / cnt_tot
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = | np.expand_dims(train_images / 255, axis=-1) | numpy.expand_dims |
"""
volreader.py
Author: <NAME>
Utilities for reading 3D volumetric data as a 3D OpenGL texture.
"""
import os
import numpy as np
from PIL import Image
import OpenGL
from OpenGL.GL import *
from scipy import misc
def loadVolume(dirName):
"""read volume from directory as a 3D texture"""
# list images in directory
files = sorted(os.listdir(dirName))
print('loading mages from: %s' % dirName)
imgDataList = []
count = 0
width, height = 0, 0
for file in files:
file_path = os.path.abspath(os.path.join(dirName, file))
try:
# read image
img = Image.open(file_path)
imgData = np.array(img.getdata(), np.uint8)
# check if all are of the same size
if count is 0:
width, height = img.size[0], img.size[1]
imgDataList.append(imgData)
else:
if (width, height) == (img.size[0], img.size[1]):
imgDataList.append(imgData)
else:
print('mismatch')
raise RunTimeError("image size mismatch")
count += 1
#print img.size
except:
# skip
print('Invalid image: %s' % file_path)
# load image data into single array
depth = count
data = | np.concatenate(imgDataList) | numpy.concatenate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.