prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import copy
import numpy as np
from lib.skeleton.skeleton import Human36mSkeleton
from lib.dataset.mocap_dataset import MocapDataset
from lib.camera.camera import CameraInfoPacket
mpii_3dhp_cameras_intrinsic_params = [
{
'id': '0',
'center': [1024.704, 1051.394],
'focal_length': [1497.693, 1497.103],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '1',
'center': [1030.519, 1052.626],
'focal_length': [1495.217, 1495.52],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '2',
'center': [983.8873, 987.5902],
'focal_length': [1495.587, 1497.828],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '3',
'center': [1029.06, 1041.409],
'focal_length': [1495.886, 1496.033],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '4',
'center': [987.6075, 1019.069],
'focal_length': [1490.952, 1491.108],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '5',
'center': [1012.331, 998.5009],
'focal_length': [1500.414, 1499.971],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '6',
'center': [999.7319, 1010.251],
'focal_length': [1498.471, 1498.8],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '7',
'center': [987.2716, 976.8773],
'focal_length': [1498.831, 1499.674],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '8',
'center': [1017.387, 1043.032],
'focal_length': [1500.172, 1500.837],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '9',
'center': [1010.423, 1037.096],
'focal_length': [1501.554, 1501.9],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '10',
'center': [1041.614, 997.0433],
'focal_length': [1498.423, 1498.585],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '11',
'center': [1009.802, 999.9984],
'focal_length': [1495.779, 1493.703],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '12',
'center': [1000.56, 1014.975],
'focal_length': [1501.326, 1501.491],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
},
{
'id': '13',
'center': [1005.702, 1004.214],
'focal_length': [1496.961, 1497.378],
'res_w': 2048,
'res_h': 2048,
'azimuth': 70,
}
]
mpii_3dhp_cameras_extrinsic_params = [
{
'translation': [-0.5628666, 1.3981379999999999, 3.852623],
'R':
[
[0.9650164, -0.262144, 0.00488022],
[-0.004488356, -0.0351275, -0.9993728],
[0.262151, 0.9643893, -0.03507521]
]
},
{
'translation': [-1.429856, 0.7381779, 4.897966],
'R':
[
[0.6050639, -0.7958773, -0.02184232],
[-0.22647, -0.1457429, -0.9630526],
[0.7632883, 0.587655, -0.2684261]
]
},
{
'translation': [0.05725702, 1.307287, 2.7998220000000003],
'R':
[
[-0.3608179, -0.932588, -0.009492658],
[-0.0585942, 0.03282591, -0.9977421],
[0.9307939, -0.359447, -0.06648842]
]
},
{
'translation': [-0.2848168, 0.8079184, 3.1771599999999998],
'R':
[
[-0.0721105, -0.9962325, -0.04817664],
[-0.4393254, 0.07508985, -0.8951841],
[0.895429, -0.04338695, -0.443085]
]
},
{
'translation': [-1.563911, 0.8019607999999999, 3.5173159999999997],
'R':
[
[0.3737275, -0.9224646, 0.09688602],
[-0.009716132, -0.1083427, -0.9940662],
[0.9274878, 0.3705685, -0.04945343]
]
},
{
'translation': [0.35841340000000005, 0.9945657999999999, 3.439832],
'R':
[
[-0.3521056, 0.9358659, 0.01328985],
[-0.04961938, -0.004485628, -0.9987582],
[-0.9346441, -0.3523278, 0.0480165]
]
},
{
'translation': [0.5694388, 0.528871, 3.6873690000000003],
'R':
[
[-0.9150326, -0.4004618, -0.04843184],
[-0.1804886, 0.5138369, -0.8386868],
[0.3607481, -0.7586845, -0.5424563]
]
},
{
'translation': [1.378866, 1.270781, 2.631567],
'R':
[
[-0.9995936, -0.001368653, 0.02847456],
[-0.02843213, -0.0246889, -0.9992908],
[0.002070688, -0.9996943, 0.02463995]
]
},
{
'translation': [0.2213543, 0.65987, 3.644688],
'R':
[
[0.000575281, 0.9981001, 0.06160985],
[0.2082146, 0.06013997, -0.9762325],
[-0.978083, 0.01338968, -0.2077844]
]
},
{
'translation': [0.38862169999999996, 0.1375452, 4.216635],
'R':
[
[0.04176839, 0.9990969, 0.00780962],
[0.5555364, -0.01672664, -0.831324],
[-0.8304425, 0.03906159, -0.5557333]
]
},
{
'translation': [1.167962, 0.6176362000000001, 4.472351],
'R':
[
[-0.8970265, 0.4204822, 0.1361548],
[0.09417118, 0.4828178, -0.8706428],
[-0.4318278, -0.7681679, -0.4726976]
]
},
{
'translation': [0.1348272, 0.2515094, 4.570244],
'R':
[
[0.9170455, 0.3465695, 0.1972746],
[0.1720879, -0.7901813, 0.5882171],
[0.3597408, -0.5054733, -0.7842726]
]
},
{
'translation': [0.4124695, 0.5327588, 4.887095],
'R':
[
[-0.7926738, -0.5951031, 0.1323657],
[-0.396246, 0.66792, 0.6299778],
[-0.4633114, 0.4469175, -0.7652499]
]
},
{
'translation': [0.8671278, 0.8274571999999999, 3.985159],
'R':
[
[-0.8701088, 0.4835728, -0.09522671],
[0.4120245, 0.8197188, 0.3978655],
[0.270456, 0.3069505, -0.9124883]
]
}
]
subjects = [
'S1_Seq1_0', 'S1_Seq1_1', 'S1_Seq1_2', 'S1_Seq1_3', 'S1_Seq1_4', 'S1_Seq1_5', 'S1_Seq1_6', 'S1_Seq1_7',
'S1_Seq1_8', 'S1_Seq1_9', 'S1_Seq1_10', 'S1_Seq1_11', 'S1_Seq1_12', 'S1_Seq1_13', 'S1_Seq2_0', 'S1_Seq2_1',
'S1_Seq2_2', 'S1_Seq2_3', 'S1_Seq2_4', 'S1_Seq2_5', 'S1_Seq2_6', 'S1_Seq2_7', 'S1_Seq2_8', 'S1_Seq2_9',
'S1_Seq2_10', 'S1_Seq2_11', 'S1_Seq2_12', 'S1_Seq2_13', 'S2_Seq1_0', 'S2_Seq1_1', 'S2_Seq1_2', 'S2_Seq1_3',
'S2_Seq1_4', 'S2_Seq1_5', 'S2_Seq1_6', 'S2_Seq1_7', 'S2_Seq1_8', 'S2_Seq1_9', 'S2_Seq1_10', 'S2_Seq1_11',
'S2_Seq1_12', 'S2_Seq1_13', 'S2_Seq2_0', 'S2_Seq2_1', 'S2_Seq2_2', 'S2_Seq2_3', 'S2_Seq2_4', 'S2_Seq2_5',
'S2_Seq2_6', 'S2_Seq2_7', 'S2_Seq2_8', 'S2_Seq2_9', 'S2_Seq2_10', 'S2_Seq2_11', 'S2_Seq2_12', 'S2_Seq2_13',
'S3_Seq1_0', 'S3_Seq1_1', 'S3_Seq1_2', 'S3_Seq1_3', 'S3_Seq1_4', 'S3_Seq1_5', 'S3_Seq1_6', 'S3_Seq1_7',
'S3_Seq1_8', 'S3_Seq1_9', 'S3_Seq1_10', 'S3_Seq1_11', 'S3_Seq1_12', 'S3_Seq1_13', 'S3_Seq2_0', 'S3_Seq2_1',
'S3_Seq2_2', 'S3_Seq2_3', 'S3_Seq2_4', 'S3_Seq2_5', 'S3_Seq2_6', 'S3_Seq2_7', 'S3_Seq2_8', 'S3_Seq2_9',
'S3_Seq2_10', 'S3_Seq2_11', 'S3_Seq2_12', 'S3_Seq2_13', 'S4_Seq1_0', 'S4_Seq1_1', 'S4_Seq1_2', 'S4_Seq1_3',
'S4_Seq1_4', 'S4_Seq1_5', 'S4_Seq1_6', 'S4_Seq1_7', 'S4_Seq1_8', 'S4_Seq1_9', 'S4_Seq1_10', 'S4_Seq1_11',
'S4_Seq1_12', 'S4_Seq1_13', 'S4_Seq2_0', 'S4_Seq2_1', 'S4_Seq2_2', 'S4_Seq2_3', 'S4_Seq2_4', 'S4_Seq2_5',
'S4_Seq2_6', 'S4_Seq2_7', 'S4_Seq2_8', 'S4_Seq2_9', 'S4_Seq2_10', 'S4_Seq2_11', 'S4_Seq2_12', 'S4_Seq2_13',
'S5_Seq1_0', 'S5_Seq1_1', 'S5_Seq1_2', 'S5_Seq1_3', 'S5_Seq1_4', 'S5_Seq1_5', 'S5_Seq1_6', 'S5_Seq1_7',
'S5_Seq1_8', 'S5_Seq1_9', 'S5_Seq1_10', 'S5_Seq1_11', 'S5_Seq1_12', 'S5_Seq1_13', 'S5_Seq2_0', 'S5_Seq2_1',
'S5_Seq2_2', 'S5_Seq2_3', 'S5_Seq2_4', 'S5_Seq2_5', 'S5_Seq2_6', 'S5_Seq2_7', 'S5_Seq2_8', 'S5_Seq2_9',
'S5_Seq2_10', 'S5_Seq2_11', 'S5_Seq2_12', 'S5_Seq2_13', 'S6_Seq1_0', 'S6_Seq1_1', 'S6_Seq1_2', 'S6_Seq1_3',
'S6_Seq1_4', 'S6_Seq1_5', 'S6_Seq1_6', 'S6_Seq1_7', 'S6_Seq1_8', 'S6_Seq1_9', 'S6_Seq1_10', 'S6_Seq1_11',
'S6_Seq1_12', 'S6_Seq1_13', 'S6_Seq2_0', 'S6_Seq2_1', 'S6_Seq2_2', 'S6_Seq2_3', 'S6_Seq2_4', 'S6_Seq2_5',
'S6_Seq2_6', 'S6_Seq2_7', 'S6_Seq2_8', 'S6_Seq2_9', 'S6_Seq2_10', 'S6_Seq2_11', 'S6_Seq2_12', 'S6_Seq2_13',
'S7_Seq1_0', 'S7_Seq1_1', 'S7_Seq1_2', 'S7_Seq1_3', 'S7_Seq1_4', 'S7_Seq1_5', 'S7_Seq1_6', 'S7_Seq1_7',
'S7_Seq1_8', 'S7_Seq1_9', 'S7_Seq1_10', 'S7_Seq1_11', 'S7_Seq1_12', 'S7_Seq1_13', 'S7_Seq2_0', 'S7_Seq2_1',
'S7_Seq2_2', 'S7_Seq2_3', 'S7_Seq2_4', 'S7_Seq2_5', 'S7_Seq2_6', 'S7_Seq2_7', 'S7_Seq2_8', 'S7_Seq2_9',
'S7_Seq2_10', 'S7_Seq2_11', 'S7_Seq2_12', 'S7_Seq2_13', 'S8_Seq1_0', 'S8_Seq1_1', 'S8_Seq1_2', 'S8_Seq1_3',
'S8_Seq1_4', 'S8_Seq1_5', 'S8_Seq1_6', 'S8_Seq1_7', 'S8_Seq1_8', 'S8_Seq1_9', 'S8_Seq1_10', 'S8_Seq1_11',
'S8_Seq1_12', 'S8_Seq1_13', 'S8_Seq2_0', 'S8_Seq2_1', 'S8_Seq2_2', 'S8_Seq2_3', 'S8_Seq2_4', 'S8_Seq2_5',
'S8_Seq2_6', 'S8_Seq2_7', 'S8_Seq2_8', 'S8_Seq2_9', 'S8_Seq2_10', 'S8_Seq2_11', 'S8_Seq2_12', 'S8_Seq2_13',
'TS1', 'TS3', 'TS4'
]
camera_params = dict()
for sbj in subjects:
if sbj.startswith('S'):
subject, seq, cid = sbj.split('_')
cid = int(cid)
camera_meta = dict()
camera_meta.update(mpii_3dhp_cameras_extrinsic_params[cid])
camera_meta.update(mpii_3dhp_cameras_intrinsic_params[cid])
camera_params[sbj] = [camera_meta]
if sbj.startswith('T'):
camera_meta = dict()
camera_meta.update(mpii_3dhp_cameras_extrinsic_params[8])
camera_meta.update(mpii_3dhp_cameras_intrinsic_params[8])
camera_params[sbj] = [camera_meta]
h36m_skeleton = Human36mSkeleton(
parents=[-1, 0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12,
16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30],
joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],
joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31]
)
class Mpii3dhpDataset(MocapDataset):
def __init__(self, path, universal=False):
super().__init__(fps=50, skeleton=h36m_skeleton)
self.universal = universal
self._cameras = copy.deepcopy(camera_params)
for cameras in self._cameras.values():
for i, cam in enumerate(cameras):
for k, v in cam.items():
if k not in ['id', 'res_w', 'res_h']:
cam[k] = np.array(v, dtype='float32')
# Normalize camera frame
# if 'translation' in cam:
# cam['translation'] = cam['translation'] / 1000 # mm to meters
# DONE IN PREPARE
camera_info = dict()
for subject in self._cameras:
camera_info.setdefault(subject, list())
for cam in self._cameras[subject]:
if 'translation' not in cam:
continue
K = | np.eye(3, dtype=np.float64) | numpy.eye |
'''
Analysis utilities for Imfits.
'''
import numpy as np
import scipy.optimize
import matplotlib.pyplot as plt
from imfits import Imfits
# constant
clight = 2.99792458e10 # light speed [cm s^-1]
auTOkm = 1.495978707e8 # AU --> km
auTOcm = 1.495978707e13 # AU --> cm
auTOpc = 4.85e-6 # au --> pc
pcTOau = 2.06e5 # pc --> au
pcTOcm = 3.09e18 # pc --> cm
# Handle images
def boxtrim(self, xlim, ylim, coord_center=None):
'''
Trim a image with a given box.
Parameters
----------
- xlim, ylim: x and y ranges in arcsec.
- coord_center: Coordinate of the box center.
In the format of '00h00m00.00s 00d00m00.00s' or 'hh:mm:ss.ss dd:mm:ss.ss'.
'''
# shift center
if coord_center:
#print ('Shift the map center.')
self.shift_coord_center(coord_center)
# get axes
xaxis, yaxis, vaxis, saxis = self.axes
xaxis *=3600. # deg --> arcsec
yaxis *=3600. # deg --> arcsec
# get pixel range
xpixmin, xpixmax = np.where((xaxis >= xlim[0]) & (xaxis <= xlim[1]) )[0][0], np.where((xaxis >= xlim[0]) & (xaxis <= xlim[1]) )[0][-1]
ypixmin, ypixmax = np.where((yaxis >= ylim[0]) & (yaxis <= ylim[1]) )[0][0], np.where((yaxis >= ylim[0]) & (yaxis <= ylim[1]) )[0][-1]
rex, rey = xaxis[xpixmin:xpixmax+1]/3600, yaxis[ypixmin:ypixmax+1]/3600 # deg
renx = xpixmax - xpixmin + 1
reny = ypixmax - ypixmin + 1
# trimming
if self.naxis == 2:
self.data = self.data[ypixmin:ypixmax+1,xpixmin:xpixmax+1]
self.naxis_i = renx, reny
elif self.naxis == 3:
self.data = self.data[:,ypixmin:ypixmax+1,xpixmin:xpixmax+1]
self.naxis_i = renx, reny, self.naxis_i[2]
elif self.naxis == 4:
self.data = self.data[:,:,ypixmin:ypixmax+1,xpixmin:xpixmax+1]
self.naxis_i = renx, reny, self.naxis_i[2], self.naxis_i[3]
else:
print ('ERROR\tboxtrim: Input image must have 2--4 axes.')
return
# update axes
self.nx = renx
self.ny = reny
self.xaxis = rex
self.yaxis = rey
self.axes = rex, rey, vaxis, saxis
self.xx = self.xx[ypixmin:ypixmax+1,xpixmin:xpixmax+1]
self.xx_wcs = self.xx_wcs[ypixmin:ypixmax+1,xpixmin:xpixmax+1]
self.yy = self.yy[ypixmin:ypixmax+1,xpixmin:xpixmax+1]
self.yy_wcs = self.yy_wcs[ypixmin:ypixmax+1,xpixmin:xpixmax+1]
# functions
# 2D linear function
def func_G93(del_ra, del_dec, v0, a, b):
# See Goodman et al. (1993)
vlsr = v0 + a*del_ra + b*del_dec
return vlsr
def _func_G93(xdata,*args):
del_ra, del_dec = xdata
#print (*args)
ans = func_G93(del_ra, del_dec, *args)
return ans
def lnfit2d(cube, p0, rfit=None, dist=140.,
outname=None, outfig=True, axis=0):
'''
Fit a two-dimensional linear function to a map by the least square fitting.
Basically, the input map is assumed to be a velocity map and
the measurement result will be treated as a velocity gradient.
The fitting method is based on the one done by Goodman et al. (1993).
cube: fits file
'''
# check type
if type(cube) == Imfits:
pass
else:
print ('ERROR\tlnfit2d: Object type is not Imfits. Check the input.')
return
data = cube.data
xx = cube.xx
yy = cube.yy
naxis = cube.naxis
xx = xx*3600. # deg --> arcsec
yy = yy*3600.
# delta
dx = np.abs(xx[0,1] - xx[0,0])
dy = np.abs(yy[1,0] - yy[0,0])
#print (xx.shape)
# beam
bmaj, bmin, bpa = cube.beam # as, as, deg
# radius
rr = np.sqrt(xx*xx + yy*yy)
# check data axes
if naxis == 2:
pass
elif naxis == 3:
data = data[axis,:,:]
elif naxis == 4:
data = data[0,axis,:,:]
else:
print ('Error\tmeasure_vgrad: Input fits size is not corrected.\
It is allowed only to have 3 or 4 axes. Check the shape of the fits file.')
return
# Nyquist sampling
step = int(bmin/dx*0.5)
ny, nx = xx.shape
#print (step)
xx_fit = xx[0:ny:step, 0:nx:step]
yy_fit = yy[0:ny:step, 0:nx:step]
data_fit = data[0:ny:step, 0:nx:step]
#print (data_fit.shape)
if rfit:
where_fit = np.where(rr <= rfit)
data_fit = data[where_fit]
xx_fit = xx[where_fit]
yy_fit = yy[where_fit]
else:
data_fit = data
xx_fit = xx
yy_fit = yy
# exclude nan
xx_fit = xx_fit[~np.isnan(data_fit)]
yy_fit = yy_fit[~np.isnan(data_fit)]
data_fit = data_fit[~np.isnan(data_fit)]
#print (xx_fit)
# Ravel the meshgrids of X, Y points to a pair of 1-D arrays.
xdata = np.vstack((xx_fit, yy_fit)) # or xx.ravel()
# fitting
popt, pcov = scipy.optimize.curve_fit(_func_G93, xdata, data_fit, p0)
perr = np.sqrt(np.diag(pcov))
v0, a, b = popt
v0_err, a_err, b_err = perr
# velocity gradient
vgrad = (a*a + b*b)**0.5/dist/auTOpc # km s^-1 pc^-1
th_vgrad = np.arctan2(a,b) # radians
# error of vgrad through the error propagation
c01 = (a*a + b*b)**(-0.5)/dist/auTOpc
vgrad_err = c01*np.sqrt((a*a_err)*(a*a_err) + (b*b_err)*(b*b_err))
# error of th_vgrad through the error propagation
costh2 = np.cos(th_vgrad)*np.cos(th_vgrad)
sinth2 = | np.sin(th_vgrad) | numpy.sin |
r"""
Special Functions
.................
This following standard C99 math functions are available:
M_PI, M_PI_2, M_PI_4, M_SQRT1_2, M_E:
$\pi$, $\pi/2$, $\pi/4$, $1/\sqrt{2}$ and Euler's constant $e$
exp, log, pow(x,y), expm1, log1p, sqrt, cbrt:
Power functions $e^x$, $\ln x$, $x^y$, $e^x - 1$, $\ln 1 + x$,
$\sqrt{x}$, $\sqrt[3]{x}$. The functions expm1(x) and log1p(x)
are accurate across all $x$, including $x$ very close to zero.
sin, cos, tan, asin, acos, atan:
Trigonometry functions and inverses, operating on radians.
sinh, cosh, tanh, asinh, acosh, atanh:
Hyperbolic trigonometry functions.
atan2(y,x):
Angle from the $x$\ -axis to the point $(x,y)$, which is equal to
$\tan^{-1}(y/x)$ corrected for quadrant. That is, if $x$ and $y$ are
both negative, then atan2(y,x) returns a value in quadrant III where
atan(y/x) would return a value in quadrant I. Similarly for
quadrants II and IV when $x$ and $y$ have opposite sign.
fabs(x), fmin(x,y), fmax(x,y), trunc, rint:
Floating point functions. rint(x) returns the nearest integer.
NAN:
NaN, Not a Number, $0/0$. Use isnan(x) to test for NaN. Note that
you cannot use :code:`x == NAN` to test for NaN values since that
will always return false. NAN does not equal NAN! The alternative,
:code:`x != x` may fail if the compiler optimizes the test away.
INFINITY:
$\infty, 1/0$. Use isinf(x) to test for infinity, or isfinite(x)
to test for finite and not NaN.
erf, erfc, tgamma, lgamma: **do not use**
Special functions that should be part of the standard, but are missing
or inaccurate on some platforms. Use sas_erf, sas_erfc and sas_gamma
instead (see below). Note: lgamma(x) has not yet been tested.
Some non-standard constants and functions are also provided:
M_PI_180, M_4PI_3:
$\frac{\pi}{180}$, $\frac{4\pi}{3}$
SINCOS(x, s, c):
Macro which sets s=sin(x) and c=cos(x). The variables *c* and *s*
must be declared first.
square(x):
$x^2$
cube(x):
$x^3$
sas_sinx_x(x):
$\sin(x)/x$, with limit $\sin(0)/0 = 1$.
powr(x, y):
$x^y$ for $x \ge 0$; this is faster than general $x^y$ on some GPUs.
pown(x, n):
$x^n$ for $n$ integer; this is faster than general $x^n$ on some GPUs.
FLOAT_SIZE:
The number of bytes in a floating point value. Even though all
variables are declared double, they may be converted to single
precision float before running. If your algorithm depends on
precision (which is not uncommon for numerical algorithms), use
the following::
#if FLOAT_SIZE>4
... code for double precision ...
#else
... code for single precision ...
#endif
SAS_DOUBLE:
A replacement for :code:`double` so that the declared variable will
stay double precision; this should generally not be used since some
graphics cards do not support double precision. There is no provision
for forcing a constant to stay double precision.
The following special functions and scattering calculations are defined.
These functions have been tuned to be fast and numerically stable down
to $q=0$ even in single precision. In some cases they work around bugs
which appear on some platforms but not others, so use them where needed.
Add the files listed in :code:`source = ["lib/file.c", ...]` to your *model.py*
file in the order given, otherwise these functions will not be available.
polevl(x, c, n):
Polynomial evaluation $p(x) = \sum_{i=0}^n c_i x^i$ using Horner's
method so it is faster and more accurate.
$c = \{c_n, c_{n-1}, \ldots, c_0 \}$ is the table of coefficients,
sorted from highest to lowest.
p1evl(x, c, n):
Evaluate normalized polynomial $p(x) = x^n + \sum_{i=0}^{n-1} c_i x^i$
using Horner's method so it is faster and more accurate.
$c = \{c_{n-1}, c_{n-2} \ldots, c_0 \}$ is the table of coefficients,
sorted from highest to lowest.
sas_gamma(x):
Gamma function $\text{sas_gamma}(x) = \Gamma(x)$.
The standard math function, tgamma(x) is unstable for $x < 1$
on some platforms.
sas_gammaln(x):
log gamma function sas_gammaln\ $(x) = \log \Gamma(|x|)$.
The standard math function, lgamma(x), is incorrect for single
precision on some platforms.
sas_gammainc(a, x), sas_gammaincc(a, x):
Incomplete gamma function
sas_gammainc\ $(a, x) = \int_0^x t^{a-1}e^{-t}\,dt / \Gamma(a)$
and complementary incomplete gamma function
sas_gammaincc\ $(a, x) = \int_x^\infty t^{a-1}e^{-t}\,dt / \Gamma(a)$
sas_erf(x), sas_erfc(x):
Error function
$\text{sas_erf}(x) = \frac{2}{\sqrt\pi}\int_0^x e^{-t^2}\,dt$
and complementary error function
$\text{sas_erfc}(x) = \frac{2}{\sqrt\pi}\int_x^{\infty} e^{-t^2}\,dt$.
The standard math functions erf(x) and erfc(x) are slower and broken
on some platforms.
sas_J0(x):
Bessel function of the first kind $\text{sas_J0}(x)=J_0(x)$ where
$J_0(x) = \frac{1}{\pi}\int_0^\pi \cos(x\sin(\tau))\,d\tau$.
The standard math function j0(x) is not available on all platforms.
sas_J1(x):
Bessel function of the first kind $\text{sas_J1}(x)=J_1(x)$ where
$J_1(x) = \frac{1}{\pi}\int_0^\pi \cos(\tau - x\sin(\tau))\,d\tau$.
The standard math function j1(x) is not available on all platforms.
sas_JN(n, x):
Bessel function of the first kind and integer order $n$:
$\text{sas_JN}(n, x)=J_n(x)$ where
$J_n(x) = \frac{1}{\pi}\int_0^\pi \cos(n\tau - x\sin(\tau))\,d\tau$.
If $n$ = 0 or 1, it uses sas_J0(x) or sas_J1(x), respectively.
The standard math function jn(n, x) is not available on all platforms.
sas_Si(x):
Sine integral $\text{Si}(x) = \int_0^x \tfrac{\sin t}{t}\,dt$.
This function uses Taylor series for small and large arguments:
For large arguments,
.. math::
\text{Si}(x) \sim \frac{\pi}{2}
- \frac{\cos(x)}{x}
\left(1 - \frac{2!}{x^2} + \frac{4!}{x^4} - \frac{6!}{x^6} \right)
- \frac{\sin(x)}{x}
\left(\frac{1}{x} - \frac{3!}{x^3} + \frac{5!}{x^5} - \frac{7!}{x^7}\right)
For small arguments,
.. math::
\text{Si}(x) \sim x
- \frac{x^3}{3\times 3!} + \frac{x^5}{5 \times 5!} - \frac{x^7}{7 \times 7!}
+ \frac{x^9}{9\times 9!} - \frac{x^{11}}{11\times 11!}
sas_3j1x_x(x):
Spherical Bessel form
$\text{sph_j1c}(x) = 3 j_1(x)/x = 3 (\sin(x) - x \cos(x))/x^3$,
with a limiting value of 1 at $x=0$, where $j_1(x)$ is the spherical
Bessel function of the first kind and first order.
This function uses a Taylor series for small $x$ for numerical accuracy.
sas_2J1x_x(x):
Bessel form $\text{sas_J1c}(x) = 2 J_1(x)/x$, with a limiting value
of 1 at $x=0$, where $J_1(x)$ is the Bessel function of first kind
and first order.
gauss76.n, gauss76.z[i], gauss76.w[i]:
Points $z_i$ and weights $w_i$ for 76-point Gaussian quadrature, respectively,
computing $\int_{-1}^1 f(z)\,dz \approx \sum_{i=1}^{76} w_i\,f(z_i)$.
When translating the model to C, include 'lib/gauss76.c' in the source
and use :code:`GAUSS_N`, :code:`GAUSS_Z`, and :code:`GAUSS_W`.
Similar arrays are available in :code:`gauss20` for 20-point quadrature
and :code:`gauss150.c` for 150-point quadrature. By using
:code:`import gauss76 as gauss` it is easy to change the number of
points in the integration.
"""
# pylint: disable=unused-import
import numpy as np
# Functions to add to our standard set
from numpy import degrees, radians
# C99 standard math library functions
from numpy import exp, log, power as pow, expm1, log1p, sqrt, cbrt
from numpy import sin, cos, tan, arcsin as asin, arccos as acos, arctan as atan
from numpy import sinh, cosh, tanh, arcsinh as asinh, arccosh as acosh, arctanh as atanh
from numpy import arctan2 as atan2
from numpy import fabs, fmin, fmax, trunc, rint
from numpy import pi, nan, inf
from scipy.special import gamma as sas_gamma
from scipy.special import gammaln as sas_gammaln
from scipy.special import gammainc as sas_gammainc
from scipy.special import gammaincc as sas_gammaincc
from scipy.special import erf as sas_erf
from scipy.special import erfc as sas_erfc
from scipy.special import j0 as sas_J0
from scipy.special import j1 as sas_J1
from scipy.special import jn as sas_JN
# erf, erfc, tgamma, lgamma **do not use**
# C99 standard math constants
M_PI, M_PI_2, M_PI_4, M_SQRT1_2, M_E = np.pi, np.pi/2, np.pi/4, np.sqrt(0.5), np.e
NAN = nan
INFINITY = inf
# non-standard constants
M_PI_180, M_4PI_3 = M_PI/180, 4*M_PI/3
# can't do SINCOS in python; use "s, c = SINCOS(x)" instead
def SINCOS(x):
"""return sin(x), cos(x)"""
return sin(x), cos(x)
sincos = SINCOS
def square(x):
"""return x^2"""
return x*x
def cube(x):
"""return x^3"""
return x*x*x
def sas_sinx_x(x):
"""return sin(x)/x"""
from numpy import sinc as _sinc
return _sinc(x/M_PI)
def powr(x, y):
"""return x^y for x>0"""
return x**y
def pown(x, n):
"""return x^n for n integer"""
return x**n
FLOAT_SIZE = 8
def polevl(x, c, n):
"""return p(x) for polynomial p of degree n-1 with coefficients c"""
return np.polyval(c[:n], x)
def p1evl(x, c, n):
"""return x^n + p(x) for polynomial p of degree n-1 with coefficients c"""
return np.polyval(np.hstack(([1.], c))[:n], x)
def sas_Si(x):
"""return Si(x)"""
from scipy.special import sici
return sici(x)[0]
def sas_j1(x):
"""return j1(x)"""
if np.isscalar(x):
retvalue = (sin(x) - x*cos(x))/x**2 if x != 0. else 0.
else:
with np.errstate(all='ignore'):
retvalue = (sin(x) - x*cos(x))/x**2
retvalue[x == 0.] = 0.
return retvalue
def sas_3j1x_x(x):
"""return 3*j1(x)/x"""
if np.isscalar(x):
retvalue = 3*(sin(x) - x*cos(x))/x**3 if x != 0. else 1.
else:
with np.errstate(all='ignore'):
retvalue = 3*(sin(x) - x* | cos(x) | numpy.cos |
import pytest
from pathlib import Path
import pickle
import numpy as np
import pandas as pd
from scipy import sparse as sp
from scipy.stats import mannwhitneyu
from numpy.random import negative_binomial, binomial, seed
from anndata import AnnData
from scanpy.tools import rank_genes_groups
from scanpy.tools._rank_genes_groups import _RankGenes
from scanpy.get import rank_genes_groups_df
from scanpy.datasets import pbmc68k_reduced
from scanpy._utils import select_groups
HERE = Path(__file__).parent / Path('_data/')
# We test results for a simple generic example
# Tests are conducted for sparse and non-sparse AnnData objects.
# Due to minor changes in multiplication implementation for sparse and non-sparse objects,
# results differ (very) slightly
def get_example_data(*, sparse=False):
# create test object
adata = AnnData(np.multiply(binomial(1, 0.15, (100, 20)), negative_binomial(2, 0.25, (100, 20))))
# adapt marker_genes for cluster (so as to have some form of reasonable input
adata.X[0:10, 0:5] = np.multiply(binomial(1, 0.9, (10, 5)), negative_binomial(1, 0.5, (10, 5)))
# The following construction is inefficient, but makes sure that the same data is used in the sparse case
if sparse:
adata.X = sp.csr_matrix(adata.X)
# Create cluster according to groups
adata.obs['true_groups'] = pd.Categorical(np.concatenate((
np.zeros((10,), dtype=int),
np.ones((90,), dtype=int),
)))
return adata
def get_true_scores():
with Path(HERE, 'objs_t_test.pkl').open('rb') as f:
true_scores_t_test, true_names_t_test = pickle.load(f)
with Path(HERE, 'objs_wilcoxon.pkl').open('rb') as f:
true_scores_wilcoxon, true_names_wilcoxon = pickle.load(f)
return true_names_t_test, true_names_wilcoxon,\
true_scores_t_test, true_scores_wilcoxon
def test_results_dense():
seed(1234)
adata = get_example_data()
true_names_t_test, true_names_wilcoxon,\
true_scores_t_test, true_scores_wilcoxon = get_true_scores()
rank_genes_groups(adata, 'true_groups', n_genes=20, method='t-test')
adata.uns['rank_genes_groups']['names'] = adata.uns['rank_genes_groups']['names'].astype(true_names_t_test.dtype)
for name in true_scores_t_test.dtype.names:
assert np.allclose(true_scores_t_test[name], adata.uns['rank_genes_groups']['scores'][name])
assert np.array_equal(true_names_t_test, adata.uns['rank_genes_groups']['names'])
rank_genes_groups(adata, 'true_groups', n_genes=20, method='wilcoxon')
adata.uns['rank_genes_groups']['names'] = adata.uns['rank_genes_groups']['names'].astype(true_names_wilcoxon.dtype)
for name in true_scores_t_test.dtype.names:
assert np.allclose(true_scores_wilcoxon[name][:7], adata.uns['rank_genes_groups']['scores'][name][:7])
assert np.array_equal(true_names_wilcoxon[:7], adata.uns['rank_genes_groups']['names'][:7])
def test_results_sparse():
seed(1234)
adata = get_example_data(sparse=True)
true_names_t_test, true_names_wilcoxon,\
true_scores_t_test, true_scores_wilcoxon = get_true_scores()
rank_genes_groups(adata, 'true_groups', n_genes=20, method='t-test')
adata.uns['rank_genes_groups']['names'] = adata.uns['rank_genes_groups']['names'].astype(true_names_t_test.dtype)
for name in true_scores_t_test.dtype.names:
assert | np.allclose(true_scores_t_test[name], adata.uns['rank_genes_groups']['scores'][name]) | numpy.allclose |
name = "attitudes"
''' Attitude Control Module
Ok, so the principle behind this module is the base class "att", which represents an attitude
description, by default of type "DCM". Can also be created from Euler Angles, PRV, Quaternions,
CRPs, and MRPs. Can also be transformed into these others by a method as well.
For simplicity's sake, I'm going to treat these classes as kind of a "dual number" where the DCM
representation is stored, but for all other types, the representation of that type is also stored.
This should allow for direct quaternion addition and so forth.
This should also allow me to simplify the addition/subtraction functions into a single function,
that read the types of the inputs and acts accordingly.
There will probably also be an angular acceleration vector class, but I'll get there when I get
there.
Author: <NAME>
'''
#standard imports
import numpy as np
from numpy import linalg as LA
# ------------------------------------------------------------------------------------------------
# CONSTANTS
# ------------------------------------------------------------------------------------------------
# Nothing here yet
# -----------------------------------------------------------------------------------------------
# BASE CLASS "ATT"
# -----------------------------------------------------------------------------------------------
class att():
''' Attitude Description Class
Defines an attitude, by default from a DCM description. Also contains a whole bunch of class
methods for defining by other means (CRP, quaternions, etc).
Arguments:
DCM: (ndarray [2x2]) General 3x3 DCM of the attitude description
'''
def __init__(self, DCM, type='DCM', angle_vec=np.array([]),units='rad',
euler_type=None,phi=None,path=None):
''' Standard Definition from a DCM '''
if np.max(np.abs((DCM.T @ DCM) - np.eye(3))) > 1e-3:
raise ValueError('DCM doesn\'t appear to be orthonormal')
self.DCM = DCM
self.type = type
self.units = units
if euler_type:
self.order = euler_type
if angle_vec != np.array([]):
self.vec = angle_vec
if phi:
self.phi = phi
if path:
self.path = path
def __repr__(self):
if self.type == 'DCM':
return 'DCM Attitude description is \n {}'.format(self.DCM)
elif self.type == 'PRV':
statement = ''' \n
{} Attitude description is: \n e = {} \n Phi = {} {} \n
\n DCM description is: \n {} \n
'''
return statement.format(self.type,list(self.vec),self.phi,self.units,self.DCM)
elif self.type == 'Euler Angle':
statement = '\n {} {} Attitude description is: \n {} {} \n \n DCM description is: \n {} \n'
return statement.format(self.order,self.type,list(self.vec),self.units,self.DCM)
else:
statement = '\n {} Attitude description is: \n {} \n \n DCM description is: \n {} \n'
return statement.format(self.type,np.array(self.vec).flatten(),self.DCM)
@classmethod
def _from_eul_ang(cls,type,ang1,ang2,ang3,units='deg'):
''' Definition from Euler Angles
Takes a type, 3 angles, and units to determine a DCM, then records both sets
Arguments:
type: (int) int of order of rotation axes
ang1: (float) angle of rotation about first axis
ang2: (float) angle of rotation about second axis
ang3: (float) angle of rotation about third axis
units: (string) either 'rad' or 'deg'
'''
if units=='deg':
ang1, ang2, ang3 = np.radians(ang1),np.radians(ang2),np.radians(ang3)
if type not in (123,132,213,231,312,321,131,121,212,232,313,323):
raise ValueError('Euler angle type definition is incorrect')
angle_vec = np.array([ang1,ang2,ang3])
type = str(type)
DCM = eul_to_DCM(int(type[0]),ang1,int(type[1]),ang2,int(type[2]),ang3,'rad')
if units=='deg':
angle_vec = np.degrees(angle_vec)
return cls(DCM,'Euler Angle',angle_vec=angle_vec,units=units,euler_type=type)
@classmethod
def _from_PRV(cls,vec,phi=None,units='rad'):
''' Definition from Principle Rotation Vector
Takes either a vector with norm != 1 or a normalized vector and a phi rotation magnitude
Internally, the normalized vector and the phi rotation are used
Arguments:
vec: (list) principle rotation vector
phi: (float) optional, rotation magnitude
units: (string) either 'rad' or 'deg' to specify units for phi
'''
if not phi:
phi = LA.norm(vec)
vec = vec/LA.norm(vec)
if units=='deg':
phi = np.radians(phi)
e1,e2,e3 = vec
sigma = 1 - np.cos(phi)
cphi = np.cos(phi)
sphi = np.sin(phi)
C = np.array([[e1*e1*sigma+cphi,e1*e2*sigma+e3*sphi,e1*e3*sigma - e2*sphi],
[e2*e1*sigma - e3*sphi,e2**2*sigma+cphi,e2*e3*sigma+e1*sphi],
[e3*e1*sigma+e2*sphi,e3*e2*sigma-e1*sphi,e3**2*sigma+cphi]])
if units=='deg':
phi = np.degrees(phi)
return cls(C,'PRV', units=units, angle_vec=np.array(vec), phi=phi)
@classmethod
def _from_quat(cls,vec):
'''Definition from Quaternions
Takes in a quaternion and spits out an attitude object (DCM). Checks first for a valid
quaternion
Arguments:
vec: (list) of quaternion values
'''
if np.abs(LA.norm(vec)-1) > 1e-13:
raise ValueError('Quaternions must have norm of 1')
b0,b1,b2,b3 = vec
C = np.array([[b0**2+b1**2-b2**2-b3**2, 2*(b1*b2+b0*b3), 2*(b1*b3-b0*b2)],
[2*(b1*b2-b0*b3), b0**2-b1**2+b2**2-b3**2, 2*(b2*b3+b0*b1)],
[2*(b1*b3+b0*b2), 2*(b2*b3-b0*b1), b0**2-b1**2-b2**2+b3**2]])
return cls(C,'Quaternion', angle_vec=vec)
@classmethod
def _from_CRP(cls,vec):
'''Definition from Classical Rodriguez Parameters
Uses the vector definition of the DCM to convert CRPs into a valid attitude object (element
option also available in comments)
Arguments:
vec: (list) of CRP values
'''
q = np.atleast_2d(vec).reshape(3,1)
C = (1/(1+q.T@q))*((1-q.T@q)*np.eye(3) + 2 * q @ q.T - 2 * tilde(q))
# q1,q2,q3 = q.reshape(np.size(vec))
# C = np.array([[1+q1**2-q2**2-q3**2, 2*(q1*q2+q3), 2*(q1*q3-q2)],
# [2*(q1*q2-q3), 1-q1**2+q2**2-q3**2, 2*(q2*q3+q1)],
# [2*(q1*q3+q2), 2*(q2*q3-q1), 1-q1**2-q2**2+q3**2]])
# C = (1/(1 + q.T @ q)) * C
return cls(C,'CRP',angle_vec=np.array(vec))
@classmethod
def _from_MRP(cls,vec):
'''Definition from Modified Rodriguez Parameters
Uses the vector definition of the DCM to convert MRPs into a valid attitude object. Returns
the path whether it's long (norm > 1) or short (norm < 1) with norm==1 taken to be short
Arguments:
vec: (list) of MRP values
'''
s = np.atleast_2d(vec).T
C = np.eye(3) + (8*tilde(s)@tilde(s) - 4*(1-s.T@s)*tilde(s))/(1+s.T@s)**2
if LA.norm(vec) > 1:
path = 'long'
else:
path = 'short'
return cls(C,'MRP',angle_vec=np.array(vec),path=path)
def _to_eul_ang(self,type,units='deg'):
'''Conversion to Euler Angles. There's no easy way to do this, so it's always just done
from the DCM. Which is fine, it's still quick.
Arguments:
type: (int) currently must be 321 or 313 since those are common. Will expand
units: (str) optional, units to output the angles
'''
C = self.DCM
if type == 321:
ang1 = np.arctan2(C[0,1],C[0,0])
ang2 = -np.arcsin(C[0,2])
ang3 = np.arctan2(C[1,2],C[2,2])
elif type == 313:
ang1 = np.arctan2(C[2,0],-C[2,1])
ang2 = np.arccos(C[2,2])
ang3 = np.arctan2(C[0,2],C[1,2])
if units == 'deg':
ang1,ang2,ang3 = np.degrees([ang1,ang2,ang3])
return self._from_eul_ang(type,ang1,ang2,ang3,units=units)
def _to_PRV(self, units='rad'):
'''Conversion to Principle Rotation Vector. Always done from the DCM. Doesn't need to
take any arguments
Outputs the short version of the PRV (using arccos function) and the positive output
for e_hat
'''
C = self.DCM
phi = np.arccos(0.5*(C[0,0]+C[1,1]+C[2,2]-1))
e = (1/(2*np.sin(phi)))*np.array([C[1,2]-C[2,1],C[2,0]-C[0,2],C[0,1]-C[1,0]])
if units=='deg':
phi = np.degrees(phi)
return self._from_PRV(e,phi=phi,units=units)
def _to_quat(self, path='short'):
'''If the object is a classical or modified Rodriguez parameter object, directly converts
to quaternions via known relations. Otherwise, uses sheppard's method to determine the
quaternions from the DCM.
Arguments:
path: (str) optional, tells the function whether you'd like the short way or the
long way
'''
if self.type == 'CRP':
q = self.vec
b0 = 1/np.sqrt(1+LA.norm(q)**2)
b1 = q[0]*b0
b2 = q[1]*b0
b3 = q[2]*b0
elif self.type == 'MRP':
s = self.vec
b0 = (1-LA.norm(s)**2)/(1+LA.norm(s)**2)
b1 = 2*s[0]/(1+LA.norm(s)**2)
b2 = 2*s[1]/(1+LA.norm(s)**2)
b3 = 2*s[2]/(1+LA.norm(s)**2)
else:
#the annoying way...
C = self.DCM
[[C11,C12,C13],
[C21,C22,C23],
[C31,C32,C33]] = C
trC = C[0,0]+C[1,1]+C[2,2]
b02 = 0.25*(1+trC)
b12 = 0.25*(1+2*C[0,0]-trC)
b22 = 0.25*(1+2*C[1,1]-trC)
b32 = 0.25*(1+2*C[2,2]-trC)
b0b1 = (C23 - C32)/4
b0b2 = (C31 - C13)/4
b0b3 = (C12 - C21)/4
b1b2 = (C12 + C21)/4
b3b1 = (C31 + C13)/4
b2b3 = (C23 + C32)/4
squares = [b02,b12,b22,b32]
if b02 == np.max(squares):
b0 = np.sqrt(b02)
b1 = b0b1/b0
b2 = b0b2/b0
b3 = b0b3/b0
elif b12 == np.max(squares):
b1 = np.sqrt(b12)
b0 = b0b1/b1
b2 = b1b2/b1
b3 = b3b1/b1
elif b22 == | np.max(squares) | numpy.max |
######################################################################################
#
# Authors : <NAME>, <NAME>
# KTH
# Email : <EMAIL>, <EMAIL>
#
# computations.py: implements, for the SEM, necessary computational methods
#####################################################################################
import numpy as np
import networkx as nx
# define each letter of alphabet as a vector
nuc_vec = {'A': [1., 0., 0., 0.], 'C': [0., 1., 0., 0.], 'G': [0., 0., 1., 0.], 'T': [0., 0., 0., 1.]}
alphabet_size = 4 # number of letters in alphabet
# compute upward messages
def compute_up_messages(data, tree, evo_model):
n_leaves, n_sites = data.shape
root = len(tree) - 1
# store up message for each node internal+external = 2n-1
up_table = np.ones((len(tree), alphabet_size, n_sites))
for i in range(n_leaves):
up_table[i] = np.transpose([nuc_vec[c] for c in data[i]])
for node in nx.dfs_postorder_nodes(tree, root):
if not tree.nodes[node]['type'] == 'leaf':
for child in tree.nodes[node]['children']:
t_child = tree.nodes[child]['t']
trans_matrix = evo_model.trans_matrix(t_child)
temp_table = np.dot(trans_matrix, up_table[child])
up_table[node] = np.multiply(up_table[node], temp_table)
return up_table
# compute down messages
def compute_down_messages(data, tree, evo_model, up_table):
n_leaves, n_sites = data.shape
root = len(tree) - 1
# store down message for each node intrenal+external = 2n-1
down_table = np.ones((2 * n_leaves - 1, alphabet_size, n_sites))
for node in nx.dfs_preorder_nodes(tree, root):
if not node == root:
parent = tree.nodes[node]['parent']
if parent == root:
parent_factor = down_table[root]
else:
t_parent = tree.nodes[parent]['t']
trans_matrix = evo_model.trans_matrix(t_parent)
parent_factor = np.dot(trans_matrix, down_table[parent])
# compute sibling factor (there is only one sibling for a tree)
for child in tree.nodes[parent]['children']:
if child != node:
t_child = tree.nodes[child]['t']
trans_matrix = evo_model.trans_matrix(t_child)
sibling_factor = np.dot(trans_matrix, up_table[child])
down_table[node] = | np.multiply(parent_factor, sibling_factor) | numpy.multiply |
import numpy as np
def i2t(sims, npts=None, return_ranks=False):
"""
Images->Text (Image Annotation)
sims: (N, 5N) matrix of similarity im-cap
"""
npts = sims.shape[0]
ranks = np.zeros(npts)
top1 = np.zeros(npts)
for index in range(npts):
inds = np.argsort(sims[index])[::-1]
# Score
rank = 1e20
for i in range(5 * index, 5 * index + 5, 1):
tmp = np.where(inds == i)[0][0]
if tmp < rank:
rank = tmp
ranks[index] = rank
top1[index] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
medr = np.floor(np.median(ranks)) + 1
meanr = ranks.mean() + 1
if return_ranks:
return (r1, r5, r10, medr, meanr), (ranks, top1)
else:
return (r1, r5, r10, medr, meanr)
def t2i(sims, npts=None, return_ranks=False):
"""
Text->Images (Image Search)
sims: (N, 5N) matrix of similarity im-cap
"""
npts = sims.shape[0]
ranks = np.zeros(5 * npts)
top1 = np.zeros(5 * npts)
# --> (5N(caption), N(image))
sims = sims.T
for index in range(npts):
for i in range(5):
inds = np.argsort(sims[5 * index + i])[::-1]
ranks[5 * index + i] = np.where(inds == index)[0][0]
top1[5 * index + i] = inds[0]
# Compute metrics
r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
r10 = 100.0 * len( | np.where(ranks < 10) | numpy.where |
import numpy as np
class Sum_Tree(object):
def __init__(self, capacity):
"""
capacity = 5,设置经验池大小
tree = [0,1,2,3,4,5,6,7,8,9,10,11,12] 8-12存放叶子结点p值,1-7存放父节点、根节点p值的和,0存放树节点的数量
data = [0,1,2,3,4,5] 1-5存放数据, 0存放capacity
Tree structure and array storage:
Tree index:
1 -> storing priority sum
/ \
2 3
/ \ / \
4 5 6 7
/ \ / \ / \ / \
8 9 10 11 12 -> storing priority for transitions
"""
assert capacity > 0, 'capacity must larger than zero'
self.now = 0
self.capacity = capacity
self.parent_node_count = self.get_parent_node_count(capacity)
# print(self.parent_node_count)
self.tree = np.zeros(self.parent_node_count + capacity + 1)
self.tree[0] = len(self.tree) - 1
self.data = np.zeros(capacity + 1, dtype=object)
self.data[0] = capacity
def add(self, p, data):
"""
p : property
data : [s, a, r, s_, done]
"""
idx = self.now + 1
self.data[idx] = data
tree_index = idx + self.parent_node_count
self._updatetree(tree_index, p)
if idx >= self.capacity:
self.now = 0
else:
self.now = idx
def add_batch(self, p, data):
"""
p : property
data : [s, a, r, s_, done]
"""
num = len(data)
idx = (np.arange(num) + self.now) % self.capacity + 1
self.data[idx] = data
tree_index = idx + self.parent_node_count
self._updatetree_batch(tree_index, p)
if idx[-1] >= self.capacity:
self.now = 0
else:
self.now = idx[-1]
def _updatetree(self, tree_index, p):
diff = p - self.tree[tree_index]
self._propagate(tree_index, diff)
self.tree[tree_index] = p
def _updatetree_batch(self, tree_index, p):
diff = p - self.tree[tree_index]
sort_index = np.argsort(tree_index)
tree_index = np.sort(tree_index)
diff = diff[sort_index]
self._propagate_batch(tree_index, diff)
self.tree[tree_index] = p
def _propagate(self, tree_index, diff):
parent = tree_index // 2
self.tree[parent] += diff
if parent != 1:
self._propagate(parent, diff)
def _propagate_batch(self, tree_index, diff):
parent = tree_index // 2
_parent, idx1, count = np.unique(parent, return_index=True, return_counts=True)
_, idx2 = np.unique(parent[::-1], return_index=True)
diff = (diff[len(diff) - 1 - idx2] + diff[idx1]) * count / 2
self.tree[_parent] += diff
if (_parent != 1).all():
self._propagate_batch(_parent, diff)
def get(self, seg_p_total):
"""
seg_p_total : The value of priority to sample
"""
tree_index = self._retrieve(1, seg_p_total)
data_index = tree_index - self.parent_node_count
return (tree_index, data_index, self.tree[tree_index], self.data[data_index])
def get_batch(self, ps):
assert isinstance(ps, (list, np.ndarray))
tidx, didx, p, d = zip(*[self.get(i) for i in ps])
tidx, didx, p, d = map(np.asarray, [tidx, didx, p, d])
d = [np.asarray(e) for e in zip(*d)] # [[s, a], [s, a]] => [[s, s], [a, a]]
return (tidx, didx, p, d)
def get_batch_parallel(self, ps):
assert isinstance(ps, (list, np.ndarray))
init_idx = np.full(len(ps), 1)
tidx = self._retrieve_batch(init_idx, ps)
didx = tidx - self.parent_node_count
p = self.tree[tidx]
d = self.data[didx]
tidx, didx, p, d = map(np.asarray, [tidx, didx, p, d])
d = [np.asarray(e) for e in zip(*d)] # [[s, a], [s, a]] => [[s, s], [a, a]]
return (tidx, didx, p, d)
def _retrieve(self, tree_index, seg_p_total):
left = 2 * tree_index
right = left + 1
# if index 0 is the root node
# left = 2 * tree_index + 1
# right = 2 * (tree_index + 1)
if left >= self.tree[0]:
return tree_index
return self._retrieve(left, seg_p_total) if seg_p_total <= self.tree[left] else self._retrieve(right, seg_p_total - self.tree[left])
def _retrieve_batch(self, tree_index, seg_p_total):
left = 2 * tree_index
right = left + 1
if (left >= self.tree[0]).all():
return tree_index
index = np.where(self.tree[left] >= seg_p_total, left, 0) + np.where(self.tree[left] < seg_p_total, right, 0)
seg_p_total = np.where(self.tree[left] >= seg_p_total, seg_p_total, 0) + np.where(self.tree[left] < seg_p_total, seg_p_total - self.tree[left], 0)
return self._retrieve_batch(index, seg_p_total)
def pp(self):
print(self.tree, self.data)
@property
def total(self):
return self.tree[1]
def get_parent_node_count(self, capacity):
i = 0
while True:
if pow(2, i) < capacity <= pow(2, i + 1):
return pow(2, i + 1) - 1
i += 1
if __name__ == "__main__":
from time import time
x = 0
t = 1000
for i in range(t):
tree = Sum_Tree(524288)
a = | np.arange(50000) | numpy.arange |
import os
import csv
import glob
import numpy as np
import collections
import pandas as pd
# Hold the prices for the small data frame
Prices = collections.namedtuple(
"Prices", field_names=["open", "high", "low", "close", "volume"]
)
# Hold the prices for the large data frame
PricesL = collections.namedtuple(
"Prices",
field_names=[
"open",
"high",
"low",
"close",
"volume",
"vwap",
"histogram",
"macd",
"signal",
"rsi",
"bbands",
"ma10",
"ma20",
"ma50",
],
)
# Helper function for creating the Prices-tuple
def get_tuple_from_df(df, large=False):
if large:
for i in range(10, 20, 50):
df[f"ma{i}"] = df.rolling(window=i)["close"].mean()
df = df.dropna()
return PricesL(
open=np.array(df["open"]),
high=np.array(df["high"]),
low=np.array(df["low"]),
close=np.array(df["close"]),
volume=np.array(df["Volume"]),
vwap=np.array(df["VWAP"]),
histogram=np.array(df["Histogram"]),
macd=np.array(df["MACD"]),
signal=np.array(df["Signal"]),
rsi= | np.array(df["RSI"]) | numpy.array |
# -----------------------------------
#
# 01_02 线性回归
#
# -----------------------------------
import numpy as np
import tensorflow as tf
tf.enable_eager_execution()
# -----------------------------------
# 1. 数据
# -----------------------------------
X_raw = | np.array([2013, 2014, 2015, 2016, 2017], dtype=np.float32) | numpy.array |
# Copyright © 2019. <NAME>. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_depths
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params, \
cluster_ids=None, epochs = None, seed=0):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times > epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times[in_epoch], spike_clusters[in_epoch], total_units, params['isi_threshold'], params['min_isi'])
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times[in_epoch], spike_clusters[in_epoch], total_units)
print("Calculating firing rate")
firing_rate, num_spikes = calculate_firing_rate_and_spikes(spike_times[in_epoch], spike_clusters[in_epoch], total_units)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters[in_epoch], amplitudes[in_epoch], total_units)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = calculate_pc_metrics(spike_clusters[in_epoch],
total_units,
pc_features[in_epoch,:,:],
pc_feature_ind,
params['num_channels_to_compare'],
params['max_spikes_for_unit'],
spikes_for_nn,
params['n_neighbors'],
seed=seed)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters[in_epoch],
total_units,
pc_features[in_epoch,:,:],
pc_feature_ind,
spikes_for_silhouette,
seed=seed)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times[in_epoch],
spike_clusters[in_epoch],
total_units,
pc_features[in_epoch,:,:],
pc_feature_ind,
params['drift_metrics_interval_s'],
params['drift_metrics_min_spikes_per_interval'])
if(cluster_ids is None):
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids)
metrics = pd.concat((metrics, pd.DataFrame(data= OrderedDict((('cluster_id', cluster_ids_out),
('num_spikes' , num_spikes),
('firing_rate' , firing_rate),
('presence_ratio' , presence_ratio),
('isi_viol' , isi_viol),
('amplitude_cutoff' , amplitude_cutoff),
('isolation_distance' , isolation_distance),
('l_ratio' , l_ratio),
('d_prime' , d_prime),
('nn_hit_rate' , nn_hit_rate),
('nn_miss_rate' , nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name' , epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi):
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
min_time = np.min(spike_times),
max_time = np.max(spike_times),
isi_threshold=isi_threshold,
min_isi = min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units):
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
min_time = | np.min(spike_times) | numpy.min |
import numpy as np
import util.np
import util.rand
import util.mod
import util.dec
@util.dec.print_test
def test_flatten():
a = util.rand.rand(3, 5, 2)
b = util.np.flatten(a, 2)
b = np.reshape(b, a.shape)
assert util.np.sum_all(b == a) == np.prod(a.shape)
@util.dec.print_test
def test_arcsin():
pi = np.pi
# max/min, 1st phase, 2nd and 3rd phase, 4th phase
sins = [
[1, -1],
[1/np.sqrt(2), np.sqrt(3)/2],
[0.5, -0.5],
[-1/np.sqrt(2), -np.sqrt(3)/2]
]
xs = [
[0, 0],
[1, 1],
[-1, -1],
[1, 1]
]
arcs = [
[pi / 2, pi * 3 / 2],
[pi / 4, pi / 3],
[pi* 5 / 6, pi * 7 / 6],
[pi * 7 / 4, pi * 5 / 3]
]
np.testing.assert_almost_equal(util.np.arcsin(sins = sins, xs = xs), arcs)
@util.dec.print_test
def test_sin():
# when angles are provided
angles = util.rand.rand(3, 5)
assert util.np.sum_all(util.np.sin(angles = angles) == | np.sin(angles) | numpy.sin |
import os.path
import numpy as np
import cv2
import torch
import torch.utils.data as data
import data.util as util
from utils import util as img_util
import math
class CoFourLevelsDataset(data.Dataset):
'''
Supports up to 4 backbones.
Generates D1, D2, D3 on-the-fly using PIL.
HR_Color are the ground-truth color images in their original resolution; for example, 256 * 256, scale = 16
D1 = color images, 32 * 32 = ((2/scale) * original)
D2 = color images, 64 * 64 = ((4/scale) * original)
D3 = color images, 128 * 128 = ((8/scale) * original)
D4 = color images, 256 * 256 = HR_Color image
In the colorization task, the input and output resolutions are equal; therefore, we wouldn't have LR_Color
'''
def __init__(self, opt):
super(CoFourLevelsDataset, self).__init__()
self.opt = opt
self.paths_HR_Color = None
self.HR_Color_env = None
self.paths_D1 = None
self.D1_env = None
# read image list from subset list txt
if opt['subset_file'] is not None and opt['phase'] == 'train':
with open(opt['subset_file']) as f:
self.paths_HR_Color = sorted([os.path.join(opt['dataroot_HR_Color'], line.rstrip('\n')) for line in f])
if opt['dataroot_LR_Color'] is not None:
raise NotImplementedError('Now subset only supports generating LR on-the-fly.')
else: # read image list from lmdb or image files
self.HR_Color_env, self.paths_HR_Color = util.get_image_paths(opt['data_type'], opt['dataroot_HR_Color'])
self.D1_env, self.paths_D1 = util.get_image_paths(opt['data_type'], opt['dataroot_D1'])
self.D2_env, self.paths_D2 = util.get_image_paths(opt['data_type'], opt['dataroot_D2'])
self.D3_env, self.paths_D3 = util.get_image_paths(opt['data_type'], opt['dataroot_D3'])
assert self.paths_HR_Color, 'Error: HR_Color path is empty.'
self.random_scale_list = [1]
self.rarity_masks = []
if opt['phase'] == 'train':
if 'rarity_mask_1' in self.opt:
for i in range(0, int(math.log(opt['scale'], 2))):
self.rarity_masks.append(np.load(opt['rarity_mask_{}'.format(i + 1)], mmap_mode='r'))
def __getitem__(self, index):
scale = self.opt['scale']
validity = True
# generating outputs for the specified IDs in the test set rather than generating outputs for the whole test set
if self.opt['phase'] == 'val':
if 'target_images_id' in self.opt:
if self.paths_HR_Color[index] not in self.opt['target_images_id']:
return {'is_valid': False}
# get HR_Color image
HR_path = self.paths_HR_Color[index]
img_HR_bgr, img_HR_bgr_no_scaled = util.read_img(self.HR_Color_env, HR_path) # HWC, BGR, [0,1], [0, 255]
# force to 3 channels
if img_HR_bgr.ndim == 2:
img_HR_bgr = cv2.cvtColor(img_HR_bgr, cv2.COLOR_GRAY2BGR)
img_HR_bgr_no_scaled = cv2.cvtColor(img_HR_bgr_no_scaled, cv2.COLOR_GRAY2BGR)
img_HR_rgb = cv2.cvtColor(img_HR_bgr, cv2.COLOR_BGR2RGB) # HWC, RGB, [0, 1], 256 * 256
img_HR_rgb_no_scaled = cv2.cvtColor(img_HR_bgr_no_scaled, cv2.COLOR_BGR2RGB) # HWC, RGB, [0, 255], 256 * 256
# D1, D2, D3, D4
if self.paths_D1:
D1_path = self.paths_D1[index]
D2_path = self.paths_D2[index]
D3_path = self.paths_D3[index]
img_D1_bgr, _ = util.read_img(self.D1_env, D1_path)
img_D2_bgr, _ = util.read_img(self.D2_env, D2_path)
img_D3_bgr, _ = util.read_img(self.D3_env, D3_path)
img_D1_rgb = cv2.cvtColor(img_D1_bgr, cv2.COLOR_BGR2RGB)
img_D2_rgb = cv2.cvtColor(img_D2_bgr, cv2.COLOR_BGR2RGB)
img_D3_rgb = cv2.cvtColor(img_D3_bgr, cv2.COLOR_BGR2RGB)
else: # down-sampling on-the-fly
# HWC, RGB, [0, 1]
img_D1_rgb = img_util.downsample_PIL(rgb_no_scaled=img_HR_rgb_no_scaled, scale=2.0 / scale) # 32 * 32
img_D2_rgb = img_util.downsample_PIL(rgb_no_scaled=img_HR_rgb_no_scaled, scale=4.0 / scale) # 64 * 64
img_D3_rgb = img_util.downsample_PIL(rgb_no_scaled=img_HR_rgb_no_scaled, scale=8.0 / scale) # 128 * 128
D1_path = D2_path = D3_path = HR_path
# augmentation - flip, rotate
if self.opt['phase'] == 'train':
img_HR_rgb, img_D1_rgb, img_D2_rgb, img_D3_rgb = util.augment(
[img_HR_rgb, img_D1_rgb, img_D2_rgb, img_D3_rgb],
self.opt['use_flip'], self.opt['use_rot'])
# L channel
img_HR_lab = img_util.rgb2lab(img_HR_rgb)
img_D1_lab = img_util.rgb2lab(img_D1_rgb)
img_D2_lab = img_util.rgb2lab(img_D2_rgb)
img_D3_lab = img_util.rgb2lab(img_D3_rgb)
HR_L_channel = img_HR_lab[:, :, 0] / 100.0
D1_L_channel = img_D1_lab[:, :, 0] / 100.0
D2_L_channel = img_D2_lab[:, :, 0] / 100.0
D3_L_channel = img_D3_lab[:, :, 0] / 100.0
HR_L_channel_tensor = torch.Tensor(HR_L_channel)[None, :, :]
D1_L_channel_tensor = torch.Tensor(D1_L_channel)[None, :, :]
D2_L_channel_tensor = torch.Tensor(D2_L_channel)[None, :, :]
D3_L_channel_tensor = torch.Tensor(D3_L_channel)[None, :, :]
# HWC to CHW, numpy to tensor
img_HR_tensor_rgb = torch.from_numpy(np.ascontiguousarray(np.transpose(img_HR_rgb, (2, 0, 1)))).float()
img_D1_tensor_rgb = torch.from_numpy(np.ascontiguousarray( | np.transpose(img_D1_rgb, (2, 0, 1)) | numpy.transpose |
import numpy as np
from scipy import optimize
def fit(X,
y,
population_count=100,
elite_count=2,
velocity_rate=0.001,
epoch_count=25):
params_count = 4
lower_limits = np.array([0, 0, -np.pi, -1])
upper_limits = np.array([1, np.pi * 2, np.pi, 1])
bounds = np.array([(l, u) for l, u in zip(lower_limits, upper_limits)])
def function(afsb, t):
return afsb[..., 0:1] * np.sin(afsb[..., 1:2] * t - afsb[..., 2:3]) + afsb[..., 3:4]
def error(params, X, y):
y_ = function(params, X)
return np.sqrt(np.sum((y - y_) ** 2, axis=-1) / X.shape[-1])
def extinctions(fitness):
return (swarm_fitness + np.min(swarm_fitness) * (
((params_count - 1.0) / (population_count - 1.0)) - 1.0)) / np.max(
swarm_fitness)
# initial population
swarm_positions = np.random.uniform(lower_limits, upper_limits, (population_count, params_count))
swarm_velocities = np.random.uniform(-0.1, 0.1, population_count * params_count).reshape(
(population_count, params_count))
swarm_fitness = error(swarm_positions, X[np.newaxis, :], y)
swarm_extinction = extinctions(swarm_fitness)
swarm_sorted_args = np.argsort(swarm_fitness, axis=0)
# global best
solution = swarm_positions[swarm_sorted_args[0], ...]
best_fitness = swarm_fitness[swarm_sorted_args[0]]
# iterate
for epoch in range(epoch_count):
# early exit if close enough
if best_fitness < 1e-6:
break
# pick elites and do a gradient descent using l-bfgs-b algorithm
for e in range(elite_count):
x, _, _ = optimize.fmin_l_bfgs_b(
func=error,
x0=swarm_positions[swarm_sorted_args[e], ...],
args=(X[np.newaxis, :], y),
approx_grad=True,
bounds=bounds,
maxiter=100)
swarm_velocities[swarm_sorted_args[e], ...] = np.random.uniform() * \
swarm_velocities[swarm_sorted_args[e], ...] + x - \
swarm_positions[swarm_sorted_args[e], ...]
swarm_positions[swarm_sorted_args[e], ...] = x
# create the offsprings
offspring_positions = np.zeros((population_count, params_count), dtype=np.float32)
offspring_velocities = np.zeros((population_count, params_count), dtype=np.float32)
offspring_fitness = np.zeros(population_count, dtype=np.float32)
# populate offsprings
for off in range(population_count):
parents_count = len(swarm_sorted_args)
# rank based selection
probabilities = np.array([parents_count - i for i in range(parents_count)], dtype=np.float32)
probabilities /= np.sum(probabilities)
a, b, prot = np.random.choice(swarm_sorted_args, 3, p=probabilities, replace=False)
# combine parents
mix_values = np.random.uniform(size=params_count)
offspring_positions[off, :] = swarm_positions[a, :] * mix_values + \
swarm_positions[b, :] * (1.0 - mix_values)
# add a bit of the velocity from the parents
offspring_positions[off, :] += velocity_rate * (swarm_velocities[a, :] + swarm_velocities[b, :])
# use the velocities from the parents
offspring_velocities[off, :] = np.random.uniform(size=params_count) * swarm_velocities[a, :] + \
np.random.uniform(size=params_count) * swarm_velocities[b, :]
# mutate
p = (np.mean(swarm_extinction[[a, b]]) * (params_count - 1.0) + 1.0) / params_count
if p < np.random.uniform():
swarm_min = np.min(swarm_positions, axis=0)
swarm_max = np.max(swarm_positions, axis=0)
x = np.random.uniform(-1, 1, size=params_count) * np.mean(swarm_extinction[[a, b]]) * (
swarm_max - swarm_min)
offspring_velocities[off, :] += x
offspring_positions[off, :] += x
# adoption
mix_values = np.random.uniform(size=params_count)
average_parents = np.mean(swarm_positions[[a, b], :], axis=0)
x = mix_values * (average_parents - offspring_positions[off, :])
mix_values = np.random.uniform(size=params_count)
x += mix_values * (offspring_positions[prot, :] - offspring_positions[off, :])
offspring_velocities[off, :] += x
offspring_positions[off, :] += x
# clip
offspring_positions[off, :] = np.clip(offspring_positions[off, :], a_min=lower_limits, a_max=upper_limits)
# compute fitness of this offspring
offspring_fitness[off] = error(offspring_positions[off, :], X, y)
# assign offsprings to population
swarm_positions = offspring_positions
swarm_velocities = offspring_velocities
swarm_fitness = offspring_fitness
# sort everyone
swarm_sorted_args = np.argsort(swarm_fitness, axis=0)
swarm_extinction = extinctions(swarm_fitness)
# try update solution
if swarm_fitness[swarm_sorted_args[0]] < best_fitness:
best_fitness = swarm_fitness[swarm_sorted_args[0]]
solution = swarm_positions[swarm_sorted_args[0], ...]
return solution, best_fitness
def fast_fit(X,
y,
population_count=200,
epoch_count=400,
original_fit=None):
weights = np.ones_like(X)
#weights[:len(X)-2] = np.linspace(0.2, 1.0, len(X) - 2)
#weights[len(X) - 2:] = np.linspace(1.0, 0.2, len(X) - 2)
def function(afsb, t):
return afsb[..., 0:1] * np.sin(afsb[..., 1:2] * t - afsb[..., 2:3]) + afsb[..., 3:4]
def error(params, X, y):
y_ = function(params, X)
return np.sqrt(np.sum(((y - y_) ** 2) * weights, axis=-1) / X.shape[-1])
params_count = 4
lower_limits = np.array([0, 0, -np.pi, -.5])
upper_limits = np.array([1, np.pi * 2, np.pi, .5])
bounds = np.array([(l, u) for l, u in zip(lower_limits, upper_limits)])
lower_limits = lower_limits[np.newaxis, :] * | np.ones((population_count, 1)) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 8 13:03:52 2020
@author: chonlatid
"""
from PIL import Image,ImageDraw
import numpy as np
import cv2
import math
class gendata():
def __init__(self):
# self.sx = 1.2
# self.sy = 1.2
self.pad_param = 20
self.rotate_degree_param = 5
self.img_rows = 128
self.img_cols = 128
self.channels = 3
self.input_shape = (self.img_rows, self.img_cols, self.channels)
def scale_and_rotate_image(self,im, sx, sy, deg_ccw,fill):
sx = 1
sy = 1
im_orig = im
im = Image.new('RGB', im_orig.size, (0, 0, 0))
im.paste(im_orig)
w, h = im.size
angle = math.radians(-deg_ccw)
cos_theta = math.cos(angle)
sin_theta = math.sin(angle)
scaled_w, scaled_h = w * sx, h * sy
new_w = int(math.ceil(math.fabs(cos_theta * scaled_w) + math.fabs(sin_theta * scaled_h)))
new_h = int(math.ceil(math.fabs(sin_theta * scaled_w) + math.fabs(cos_theta * scaled_h)))
cx = w / 2.
cy = h / 2.
tx = new_w / 2.
ty = new_h / 2.
a = cos_theta / sx
b = sin_theta / sx
c = cx - tx * a - ty * b
d = -sin_theta / sy
e = cos_theta / sy
f = cy - tx * d - ty * e
return im.transform(
(new_w, new_h),
Image.AFFINE,
(a, b, c, d, e, f),
resample=Image.BILINEAR,
fillcolor = fill
)
def gen_data(self,input_path,isPerspective = False):
img = Image.open(input_path).convert('RGB')
img = np.asarray(img)
pad_top = int(abs(np.random.uniform(0,self.pad_param)))
pad_bottom = int(abs(np.random.uniform(0,self.pad_param)))
pad_left = int(abs(np.random.uniform(0,self.pad_param)))
pad_right = int(abs(np.random.uniform(0,self.pad_param)))
# sx = int(abs(np.random.uniform(0.8,self.sx)))
# sy = int(abs(np.random.uniform(0.8,self.sy)))
rotate_param = np.random.uniform(0,self.rotate_degree_param)
src_points = np.float32([[0,0], [img.shape[1],0], [0,img.shape[0]], [img.shape[1],img.shape[0]]])
# dx0 = 0 + np.random.uniform(1,0.1*img.shape[1])
# dy0 = 0 + np.random.uniform(1,0.1*img.shape[0])
# dx1 = img.shape[0] - np.random.uniform(100,0.1*img.shape[1])
# dy1 = 0 + np.random.uniform(1,0.1*img.shape[1])
# dx2 = 0 + np.random.uniform(1,0.1*img.shape[0])
# dy2 = img.shape[1] - np.random.uniform(100,0.1*img.shape[1])
# dx3 = img.shape[0] - np.random.uniform(1,0.1*img.shape[0])
# dy3 = img.shape[1] - np.random.uniform(1,0.1*img.shape[1])
dx0 = 0 + np.random.uniform(0,0.2*img.shape[0])
dy0 = 0 + | np.random.uniform(0,0.2*img.shape[1]) | numpy.random.uniform |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#import pickle
import glob
def abs_sobel_thresh(gray, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Apply threshold
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1))
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return binary_output
def mag_thresh(gray, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Apply threshold
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
gradmag = np.sqrt(sobelx**2 + sobely**2)
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
return binary_output
def dir_threshold(gray, sobel_kernel=3, thresh=(0, np.pi/2)):
# Calculate gradient direction
# Apply threshold
# gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
gradmag = np.arctan2(np.absolute(sobely),np.absolute(sobelx))
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= thresh[0]) & (gradmag <= thresh[1])] = 1
return binary_output
def convert_binary(image):
# image = mpimg.imread('test_images/straight_lines1.jpg')
#image = mpimg.imread('test_images/test6.jpg')
# Choose a Sobel kernel size
ksize = 5 # Choose a larger odd number to smooth gradient measurements
# Apply each of the thresholding functions
#gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
#gradx = abs_sobel_thresh(gray, orient='x', sobel_kernel=ksize, thresh=(30, 255))
#grady = abs_sobel_thresh(gray, orient='y', sobel_kernel=ksize, thresh=(30, 255))
#mag_binary = mag_thresh(gray, sobel_kernel=ksize, mag_thresh=(50, 255))
#dir_binary = dir_threshold(gray, sobel_kernel=ksize, thresh=(0.95, 1))
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
# H = hls[:,:,0]
# L = hls[:,:,1]
S = hls[:,:,2]
#plt.figure(2)
#plt.imshow(S)
gradx1 = abs_sobel_thresh(S, orient='x', sobel_kernel=ksize, thresh=(20, 255))
grady1 = abs_sobel_thresh(S, orient='y', sobel_kernel=ksize, thresh=(20, 255))
mag_binary1 = mag_thresh(S, sobel_kernel=ksize, mag_thresh=(20, 255))
dir_binary1 = dir_threshold(S, sobel_kernel=ksize, thresh=(1.1, 1.3))
combined = np.zeros_like(dir_binary1)
combined[((gradx1 == 1) & (grady1 == 1)) | ((mag_binary1 == 1) & (dir_binary1 == 1))] = 1
# plt.figure(2)
# plt.imshow(combined)
return combined
images = glob.glob('test_images/*.jpg')
# Step through the list and search for chessboard corners
nCount = 0
for idx, fname in enumerate(images):
img = mpimg.imread(fname)
retImg = convert_binary(img)
imgBin = np.zeros_like(img)
imgGray = | np.zeros_like(img) | numpy.zeros_like |
# -*- coding:Utf-8 -*-
#from numpy import *
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.loss
.. autosummary::
:members:
"""
import doctest
import os
import glob
import doctest
import logging
import numpy as np
from scipy import io
import matplotlib.pylab as plt
import pylayers.gis.gisutil as gu
import numpy.linalg as la
import pdb
import time
from numba import jit
def PL0(fGHz,GtdB=0,GrdB=0,R=1):
""" Path Loss at frequency fGHZ @ R
Parameters
----------
fGHz: float
frequency GHz
GtdB: float
transmitting antenna gain dB (default 0 dB)
GrdB: float
receiving antenna gain dB (default 0 dB)
R : float
distance in m
Returns
-------
PL0 : float
path @ R
Notes
-----
.. math:: PL_0 = -20 log_{10}(\\frac{\\lambda}{4\\pi}) - GtdB -GrdB
Examples
--------
>>> fGHz = 2.4
>>> PL = PL0(fGHz)
>>> assert (PL<41)&(PL>40),"something wrong"
"""
if not isinstance(fGHz,np.ndarray):
fGHz=np.array([fGHz])
ld = 0.3/fGHz
PL0 = -20*np.log10(ld/(4.0*np.pi*R))-GtdB-GrdB
return PL0
def Dgrid_points(points,Px):
""" distance point to grid
Parameters
----------
points : np.array
grid Np x 2 array
Px : np.array
point 2 x 1 array
Returns
-------
D: Euclidian distance matrix
"""
Dx = points[:,0] - Px[0]
Dy = points[:,1] - Px[1]
D = np.sqrt( Dx*Dx + Dy*Dy )
return(D)
def FMetisShad2(fGHz,r,D,sign=1):
""" F Metis shadowing function
Parameters
----------
fGHz : np.array(Nf)
frequency GHz
r : np.array(Nseg,)
distance between Tx and Rx
D : np.array(Nseg,Nscreen)
indirect distance between Tx and Rx (screen effect)
sign : np.array(Nseg,Nscreen)
== 1 : Shadowing NLOS situation
==-1 : No shadowing LOS situation
Returns
-------
F : np.array(Nseg,Nscreen,Nf)
Notes
-----
Provides an implementation of formula (6.6) in D1.4 of METIS project
See Also
--------
LossMetisShadowing
"""
lamda = 0.3/fGHz[None,None,:]
F = np.arctan(sign[:,:,None]*np.pi/2.*(np.sqrt((np.pi/lamda)*(D[:,:,None]-r[:,None,None])))) / np.pi
return(F)
def FMetisShad(fGHz,r,D,sign=1):
""" F Metis shadowing function
Parameters
----------
fGHz : float
frequency GHz
r : float
distance between Tx and Rx
D : float
indirect distance between Tx and Rx (screen effect)
sign : int
== 1 : Shadowing NLOS situation
==-1 : No shadowing LOS situation
Notes
-----
Provides an implementation of formula (6.6) in D1.4 of METIS project
See Also
--------
LossMetisShadowing
"""
lamda = 0.3/fGHz
F = np.arctan(sign*np.pi/2.*(np.sqrt((np.pi/lamda)*(D-r)))) / np.pi
return(F)
def LossMetisShadowing(fGHz,tx,rx,pg,uw,uh,w,h):
""" Calculate the Loss from
Parameters
----------
fGHz : float
tx : np.array (,3) of floats
transmiter coordinates
rx : np.array (,3) of floats
receiver coordinates
pg : np.array (,3) of floats
center of gravity of the screen
uw : np.array (,3) of floats
unitary vector along width dimension
uh : np.array (,3) of floats
unitary vector along height dimension
w : float
width in meters
h : float
height in meters
Returns
-------
Lsh : float
Loss in dB to add to the FS path Loss
Notes
-----
This function provides an implementation of formula 6.5 of D1.4 deliverable of METIS project
[Metis D1.4](Ahttps://www.metis2020.com/wp-content/uploads/METIS_D1.4_v3.pdf)
# geometry parametric issue : find M in [tx-rx] defined as M = alpha*rx + (1-alpha)tx where alpha in [0-1].
# if alpha = 0 then M = tx ; if alpha = 1 then M = rx.
# Besides, M is defined as M = pg + beta*uw + gamma*uh then alpha*rx + (1-alpha)tx = pg + beta*uw + gamma*uh
# [rx-tx , -uw, -uh]*[alpha,beta,gamma].T = pg - tx <==> Ax = b solved by la.solve ; x[0]=alpha, x[1]=beta and
TODO To be vectorized
"""
rxtx = rx - tx # LOS distance
# x[2]=gamma.
A = np.vstack((rxtx,-uw,-uh)).T
b = pg - tx
x = la.solve(A,b)
# condition of shadowing
condseg = ((x[0]>1) or (x[0]<0))
condw = ((x[1]>w/2.) or (x[1]<-w/2.))
condh = ((x[2]>h/2.) or (x[2]<-h/2.))
visi = condseg or condw or condh
if visi:
shad = -1
else:
shad = 1
r = np.dot(rxtx,rxtx)**0.5
w1 = pg + uw*w/2.
w2 = pg - uw*w/2.
h1 = pg + uh*h/2.
h2 = pg - uh*h/2.
Dtw1 = np.dot(tx-w1,tx-w1)**0.5
Drw1 = np.dot(rx-w1,rx-w1)**0.5
Dtw2 = np.dot(tx-w2,tx-w2)**0.5
Drw2 = np.dot(rx-w2,rx-w2)**0.5
Dth1 = np.dot(tx-h1,tx-h1)**0.5
Drh1 = np.dot(rx-h1,rx-h1)**0.5
Dth2 = np.dot(tx-h2,tx-h2)**0.5
Drh2 = np.dot(rx-h2,rx-h2)**0.5
D1w = Dtw1+Drw1
D1h = Dth1+Drh1
D2w = Dtw2+Drw2
D2h = Dth2+Drh2
if shad == 1:
signw1 = 1
signw2 = 1
signh1 = 1
signh2 = 1
else:
if condw:
if D1w>D2w:
signw1=1
signw2=-1
else:
signw1=-1
signw2=1
else:
signw1 = 1
signw2 = 1
if condh:
if D1h>D2h:
signh1=1
signh2=-1
else:
signh1=-1
signh2=1
else:
signh1 = 1
signh2 = 1
Fw1 = FMetisShad(fGHz,r,D1w,sign=signw1)
Fh1 = FMetisShad(fGHz,r,D1h,sign=signh1)
Fw2 = FMetisShad(fGHz,r,D2w,sign=signw2)
Fh2 = FMetisShad(fGHz,r,D2h,sign=signh2)
tmp = (Fh1+Fh2)*(Fw1+Fw2)
Lsh = -20*np.log10(1-tmp)
#return(Lsh,shad,tmp,Fw1,Fh1,Fw2,Fh2,condh,condw)
return(Lsh)
def LossMetisShadowing2(fGHz,tx,rx,pg,uw,uh,w,h):
""" Calculate the Loss from
Parameters
----------
fGHz : np.array(,Nf)
tx : np.array (3,Nseg) of floats
transmiter coordinates
rx : np.array (3,Nseg) of floats
receiver coordinates
pg : np.array (3,Nscreen) of floats
center of gravity of the screen
uw : np.array (3,Nscreen) of floats
unitary vector along width dimension
uh : np.array (3,Nscreen) of floats
unitary vector along height dimension
w : np.array (,Nscreen)
width in meters
h : np.array (,Nscreen)
height in meters
Returns
-------
Lsh : np.array (Nseg,Nscreen,Nf)
Loss in dB to add to the FS path Loss
Notes
-----
This function provides an implementation of formula 6.5 of D1.4 deliverable of METIS project
[Metis D1.4](Ahttps://www.metis2020.com/wp-content/uploads/METIS_D1.4_v3.pdf)
# geometry parametric issue : find M in [tx-rx] defined as M = alpha*rx + (1-alpha)tx where alpha in [0-1].
# if alpha = 0 then M = tx ; if alpha = 1 then M = rx.
# Besides, M is defined as M = pg + beta*uw + gamma*uh then alpha*rx + (1-alpha)tx = pg + beta*uw + gamma*uh
# [rx-tx , -uw, -uh]*[alpha,beta,gamma].T = pg - tx <==> Ax = b solved by la.solve ; x[0]=alpha, x[1]=beta and
"""
Nseg = tx.shape[1]
Nscreen = uw.shape[1]
rxtx = rx - tx # (3,Nseg) LOS distance
# A : (Nseg,Nscreen,3,3)
# b : (Nseg,Nscreen,3)
# rxtx.T (Nseg,3)
# uw.T (Nscreen, 3)
# uh.T (Nscreen, 3)
U = rxtx.T[:,None,:,None]
W = uw.T[None,:,:,None]
H = uh.T[None,:,:,None]
We = W + np.zeros(U.shape)
He = H + np.zeros(U.shape)
Ue = U + np.zeros(He.shape)
A = np.concatenate((Ue,-We,-He),axis=3)
#A = np.vstack((rxtx,-uw,-uh)).T
# pg.T Nscreen, 3
# tx.T Nseg,3
b = pg.T[None,:,:]-tx.T[:,None,:]
#b = pg - tx
x = la.solve(A,b)
# condition of shadowing
condseg = ((x[:,:,0]>1) + (x[:,:,0]<0))
condw = ((x[:,:,1]>w[None,:]/2.) + (x[:,:,1]<-w[None,:]/2.))
condh = ((x[:,:,2]>h[None,:]/2.) + (x[:,:,2]<-h[None,:]/2.))
visi = (condseg + condw + condh)%2
# if visi:
# shad = -1
# else:
# shad = 1
#shad = - visi
r = np.sum(rxtx*rxtx,axis=0)**0.5
w1 = pg + uw*w[None,:]/2.
w2 = pg - uw*w[None,:]/2.
h1 = pg + uh*h[None,:]/2.
h2 = pg - uh*h[None,:]/2.
Dtw1 = np.sum((tx[...,None]-w1[:,None,:])*(tx[...,None]-w1[:,None,:]),axis=0)**0.5
Drw1 = np.sum((rx[...,None]-w1[:,None,:])*(rx[...,None]-w1[:,None,:]),axis=0)**0.5
Dtw2 = np.sum((tx[...,None]-w2[:,None,:])*(tx[...,None]-w2[:,None,:]),axis=0)**0.5
Drw2 = np.sum((rx[...,None]-w2[:,None,:])*(rx[...,None]-w2[:,None,:]),axis=0)**0.5
Dth1 = np.sum((tx[...,None]-h1[:,None,:])*(tx[...,None]-h1[:,None,:]),axis=0)**0.5
Drh1 = np.sum((rx[...,None]-h1[:,None,:])*(rx[...,None]-h1[:,None,:]),axis=0)**0.5
Dth2 = np.sum((tx[...,None]-h2[:,None,:])*(tx[...,None]-h2[:,None,:]),axis=0)**0.5
Drh2 = np.sum((rx[...,None]-h2[:,None,:])*(rx[...,None]-h2[:,None,:]),axis=0)**0.5
# Drw1 = np.dot(rx-w1,rx-w1)**0.5
# Dtw2 = np.dot(tx-w2,tx-w2)**0.5
# Drw2 = np.dot(rx-w2,rx-w2)**0.5
# Dth1 = np.dot(tx-h1,tx-h1)**0.5
# Drh1 = np.dot(rx-h1,rx-h1)**0.5
# Dth2 = np.dot(tx-h2,tx-h2)**0.5
# Drh2 = np.dot(rx-h2,rx-h2)**0.5
D1w = Dtw1+Drw1
D1h = Dth1+Drh1
D2w = Dtw2+Drw2
D2h = Dth2+Drh2
signw1 = np.ones((Nseg,Nscreen))
signw2 = np.ones((Nseg,Nscreen))
signh1 = np.ones((Nseg,Nscreen))
signh2 = np.ones((Nseg,Nscreen))
condw1 = (visi*condw*(D1w<=D2w)).astype(bool)
condw2 = (visi*condw*(D1w>D2w)).astype(bool)
signw1[condw1]=-1
signw2[condw2]=-1
condh1 = (visi*condh*(D1h<=D2h)).astype(bool)
condh2 = (visi*condh*(D1h>D2h)).astype(bool)
signh1[condh1]=-1
signh2[condh2]=-1
Fw1 = FMetisShad2(fGHz,r,D1w,sign=signw1)
Fh1 = FMetisShad2(fGHz,r,D1h,sign=signh1)
Fw2 = FMetisShad2(fGHz,r,D2w,sign=signw2)
Fh2 = FMetisShad2(fGHz,r,D2h,sign=signh2)
tmp = (Fh1+Fh2)*(Fw1+Fw2)
Lsh = -20*np.log10(1-tmp)
#return(Lsh,shad,tmp,Fw1,Fh1,Fw2,Fh2,condh,condw)
return(Lsh)
def Dgrid_zone(zone,Px):
""" Distance point to zone
A zone is a quadrilateral zone.
Parameters
----------
zone : dictionnary
xmin xmax Nx
ymin ymax Ny
Px : np.array
point
Returns
-------
D : np.array Nx x Ny
Euclidian distance matrix
Notes
-----
Build the distance matrix between Tx and points in the zone
use broadcasting instead
"""
rx = np.linspace(zone['xmin'],zone['xmax'],zone['Nx'])
ry = np.linspace(zone['ymin'],zone['ymax'],zone['Ny'])
R_x = np.outer(np.ones(len(ry)),rx)
R_y = np.outer(ry,np.ones(len(rx)))
Dx = R_x - Px[0]
Dy = R_y - Px[1]
D = np.sqrt(Dx*Dx+Dy*Dy)
return (D)
def OneSlopeMdl(D,n,fGHz):
""" one slope model
Parameters
----------
D : np.array
distance array
n : float
path loss exponent
fGHz : np.array
frequency GHz
Returns
-------
PL : np.array
path loss as a function of distance
"""
PL = PL0(fGHz) + 10*n*np.log10(D)
return(PL)
def cost231(pBS,pMS,hroof,phir,wr,fMHz,wb=20,dB=True,city='medium'):
""" Walfish Ikegami model (COST 231)
Parameters
----------
pBS : np.array (3xNlink)
pMS : np.array (3xNlink)
hroof : np.array (1xNlink)
phir : np.array (1xNlink)
degrees
wr : np.array (1xNlink)
fMHz : np.array (1xNf)
wb : float
average building separation
dB : boolean
Returns
-------
PathLoss (Nlink,Nf)
References
----------
http://morse.colorado.edu/~tlen5510/text/classwebch3.html
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.loss import *
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> # Number of links and BS and MS heights
>>> Nlink = 100
>>> hBS = 300
>>> hMS = 1.5
>>> # hroof and phir are drawn uniformily at random
>>> hroof = 40*np.random.rand(Nlink)
>>> wr = 10*np.ones(Nlink)
>>> phir = 90*np.random.rand(Nlink)
>>> pMS = np.vstack((np.linspace(10,2500,Nlink),np.zeros(Nlink),hMS*np.ones(Nlink)))
>>> pBS = np.vstack((np.zeros(Nlink),np.zeros(Nlink),hBS*np.ones(Nlink)))
>>> # frequency range
>>> fMHz = np.linspace(700,1900,120)
>>> pl = cost231(pBS,pMS,hroof,phir,wr,fMHz)
>>> im = plt.imshow(pl,extent=(0,100,0.7,1.9))
>>> cb = plt.colorbar()
>>> cb.set_label('Loss (dB)')
>>> plt.axis('tight')
>>> plt.xlabel('Frequency (GHz)')
>>> plt.ylabel('Link Number')
>>> plt.title('100 WI Path Loss realizations ')
>>> plt.show()
"""
hBS = pBS[2,:][:,np.newaxis]
hMS = pMS[2,:][:,np.newaxis]
wr = wr[:,np.newaxis]
hroof = hroof[:,np.newaxis]
phir = phir[:,np.newaxis]
fMHz = fMHz[np.newaxis,:]
dm = np.sqrt(np.sum((pBS-pMS)*(pBS-pMS),axis=0))[:,np.newaxis]
dkm = dm/1000.
Nlink = len(dm)
pl0 = 32.4 + 20*np.log10(dkm) + 20*np.log10(fMHz)
delta_base = hBS-hroof
u035 = np.where((phir>=0) & (phir<35))
u3555 = np.where((phir>=35) & (phir<55))
u5590 = np.where((phir>=55) & (phir<90))
plori = np.zeros(Nlink)[:,np.newaxis]
# path loss due to orientation w.r.t road
plori[u035] = -10+0.354*phir[u035]
plori[u3555] = 2.5+0.075*phir[u3555]
plori[u5590] = 4.0-0.114*(phir[u5590]-55)
# rooftop to street
plrts = -16.9-10*np.log10(wr)+10*np.log10(fMHz)+20*np.log10(hroof-hMS)+plori
uroofsupBS = np.where(hBS>hroof)
uroofinfBS = np.where(hBS<=hroof)
udistsup500 = np.where((hBS<=hroof)&(dkm>0.5))
udistinf500 = np.where((hBS<=hroof)&(dkm<0.5))
plbsh = np.zeros((Nlink,1))
plbsh[uroofsupBS] = -18*np.log10(1+delta_base[uroofsupBS])
ka = 54*np.ones((Nlink,1))
ka[udistsup500] = ka[udistsup500]-0.8*delta_base[udistsup500]
ka[udistinf500] = ka[udistinf500]-0.8*delta_base[udistinf500]*dkm[udistinf500]/0.5
kd = 18*np.ones((Nlink,1))
kd[uroofinfBS] = kd[uroofinfBS]-15*delta_base[uroofinfBS]/hroof[uroofinfBS]
if city=='medium':
kf = -4+0.7*(fMHz/925.-1)
else:
kf = -4+1.5*(fMHz/925.-1)
plmsd = plbsh+ka+kd*np.log10(dkm)+kf*np.log10(fMHz)-9*np.log10(wb)
pl = pl0
padd = plmsd + plrts
ulosspos = np.where(padd>0)[0]
pl[ulosspos]=pl[ulosspos]+padd[ulosspos]
if not dB:
pl = 10**(-pl/20.)
return(pl)
def cost259(pMS,pBS,fMHz):
""" cost259 model
Parameters
----------
pMS : np.array (position of Mobile Station)
pBS : np.array (position of Base station)
fMHz : float
"""
dm = np.sqrt((pBS-pMS)*(pBS-pMS))
lmbd = 300/fMHz
pl = 10*2.6*np.log10(dm)+20*log10(4*np.pi/lmbd)
if not dB:
pl = 10**(-pl/20.);
return(pl)
def hata(pMS,pBS,fGHz,hMS,hBS,typ):
""" Hata Path loss model
Parameters
----------
pMS : np.array
Mobile position (meters)
pBS : np.array
Base station position (meters)
fGHz : np.array
hMS : height mobile station (m)
hBS : height base station (m)
typ : 'small' or 'big'
Returns
-------
L : Attenuation (dB)
Examples
--------
>>> d = np.linspace(100,5000,120)
>>> hBS = 30
>>> hMS = 1.5
>>> fGHz = 0.9
>>> pMS = np.array([d,0,hMS])
>>> pBS = np.array([d,0,hBS])
>>> L = hata(pMS,pBS,fGHz,hMS,hBS,'small')
Notes
-----
This model is valid until 1.5GHz, for higher frequency see
COST231-Hata model
References
----------
OKUMURA (Y.), OHMORI (E.), KAWANO (T.)
et FUKUA (K.). Field strength and its varia-
bility in UHF and VHF land-mobile radio ser-
vice. Rev. Elec. Commun. Lab., vol. 16, n o 9,
1968.
HATA (M.). Empirical formula for propaga-
tion loss in land mobile radio services. IEEE
Trans. Veh. Technol., vol. 29, pp. 317-325,
Aug. 1980
"""
dm = np.sqrt((pBS-pMS)*(pBS-pMS))
if (typ=='small'):
CH = (1.1*np.log10(fGHz*1000)-0.7)*hMS-(1.56*np.log10(fGHz*1000)-0.8)
if (typ=='big'):
if fGHz<0.2:
CH = 8.29*(np.log10(1.54*hMS)**2)-1.1
else:# valid until 1.5GHz
CH = 3.2*(np.log10(11.75*hMS)**2)-4.97
L = 69.55+26.16*np.log10(fGHz*1000)-13.82*np.log10(hBS)+(44.9-6.55*np.log10(hBS))*np.log10(dm/1000.)-CH
return(L)
def cost2100(pMS,pBS,fGHz,nfloor=1,dB=True):
""" cost 2100 model
Parameters
----------
pMS :
pBS :
fGHz : float
nfloor : int
dB : boolean
"""
# distance (meters)
dm = np.sqrt((pBS-pMS)*(pBS-pMS))
pl0 = 32.4+20*log10(dm)+20*np.log10(fGHz)
pld = nfloor*30
pl = pl0+pld
if not dB:
pl = 10**(-pl/20.)
return(pl)
def PL(fGHz,pts,p,n=2.0,dB=True,d0=1):
""" calculate Free Space Path Loss
Parameters
----------
fGHz : float
frequency (GHz)
pts : np.array (2xNp)
points
p : np.array (2x1) or (2xNp)
n : float
path loss exponent (default = 2)
dB : : boolean
return result in dB
Returns
-------
PL : np.array
path loss w.r.t distance and frequency
"""
shp = np.shape(p)
# assert(shp[0]==2)
D = np.sqrt(np.sum((pts-p)**2,axis=0))
# f x grid x ap
#PL = np.array([PL0(fGHz)])[:,np.newaxis] + 10*n*np.log10(D)[np.newaxis,:]
PL = PL0(fGHz,d0)[:,np.newaxis] + 10*n*np.log10(D/d0)[np.newaxis,:]
if not dB:
PL=10**(-PL/10)
return(PL)
def Losst(L,fGHz,p1,p2,dB=True):
""" calculate Losses between links p1 p2
Parameters
----------
L : Layout object
fGHz : np.array
frequency GHz
p1 : source points
(2 x Np1) array or (2,) array
p2 : observation point
(2 x Np2) array or (2,) array
dB : boolean
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> from pylayers.measures.mesuwb import *
>>> from pylayers.antprop.loss import *
>>> S = Simul()
>>> S.layout('WHERE1.lay')
>>> fGHz = 4
>>> Tx,Rx = ptw1()
>>> Lwo,Lwp,Edo,Edp = Losst(S.L,fGHz,Tx.T,Rx[1,0:2],dB=True)
>>> fig=plt.figure(figsize=(20,10))
>>> fig,ax = S.L.showGs(fig=fig)
>>> tit = plt.title('test Losst')
>>> sc2 = ax.scatter(Rx[1,0],Rx[1,1],s=20,marker='x',c='k')
>>> sc1 = ax.scatter(Tx[:,0],Tx[:,1],s=20,c=Lwo,linewidth=0)
>>> cb = plt.colorbar(sc1)
>>> cb.set_label('dB')
>>> plt.show()
See Also
--------
pylayers.antprop.coverage
pylayers.slab.Interface.losst
"""
if (type(fGHz)==float) | (type(fGHz)==int):
fGHz=np.array([fGHz],dtype=float)
sh1 = np.shape(p1)
sh2 = np.shape(p2)
if (len(sh1)>1) & (len(sh2)>1):
Nlink = max(sh1[1],sh2[1])
if (len(sh1)>1) & (len(sh2)<2):
Nlink = sh1[1]
if (len(sh1)<2) & (len(sh2)>1):
Nlink = sh2[1]
if (len(sh1)<2) & (len(sh2)<2):
Nlink = 1
# determine incidence angles on segment crossing p1-p2 segment
#data = L.angleonlink(p1,p2)
data = L.angleonlink3(p1,p2)
# as many slabs as segments and subsegments
us = data['s']
slabs = np.array([ L.Gs.node[x]['name'] for x in us ])
#slabs = L.sla[us]
check = np.where(slabs=='')
#
# As segment numbering is not necessarily contiguous
# there exist void string '' in slabs
cslab = list(np.unique(slabs))
if '' in cslab:
cslab.remove('')
if 'AIR' in cslab:
cslab.remove('AIR')
if '_AIR' in cslab:
cslab.remove('_AIR')
LossWallo = np.zeros((len(fGHz),Nlink))
LossWallp = np.zeros((len(fGHz),Nlink))
EdWallo = np.zeros((len(fGHz),Nlink))
EdWallp = np.zeros((len(fGHz),Nlink))
for slname in cslab:
# u index of slabs of name slname
# data['a'][u] angle
# data['s'][u] segment number including subsegment
u = np.nonzero(np.array(slabs)==slname)[0]
#
# calculate Loss for slab slname
#
lko,lkp = L.sl[slname].losst(fGHz,data['a'][u])
#
# calculate Excess delay for slab slname
#
do , dp = L.sl[slname].excess_grdelay(theta=data['a'][u])
# data['i'][u] links number
indexu = data['i'][u]
# reduce to involved links
involved_links, indices = np.unique(indexu,return_index=True)
indicep = np.hstack((indices[1:],np.array([len(indexu)])))
# range on involved links
irange = np.arange(len(involved_links))
#
# sum contribution of slab of a same link
#
Wallo = np.array([ np.sum(lko[:,indices[x]:indicep[x]],axis=1) for x in irange ] ).T
Wallp = np.array([ np.sum(lkp[:,indices[x]:indicep[x]],axis=1) for x in irange ] ).T
Edo = np.array([np.sum(do[indices[x]:indicep[x]]) for x in irange]).T
Edp = np.array([np.sum(dp[indices[x]:indicep[x]]) for x in irange]).T
LossWallo[:,involved_links] = LossWallo[:,involved_links] + Wallo
LossWallp[:,involved_links] = LossWallp[:,involved_links] + Wallp
EdWallo[:,involved_links] = EdWallo[:,involved_links] + Edo
EdWallp[:,involved_links] = EdWallp[:,involved_links] + Edp
# Managing Ceil / Floor transmission
# check crossing ceil
if (p1[2,:]> L.zceil).any() or (p2[2,:]> L.zceil).any():
# WARNING : this test sohould be done individually
if (p1[2]>p2[2]).all():
v0 = p1
v1 = p2
else:
v0 = p2
v1 = p1
uu = v0 - v1
# 1 x N
nu = np.sqrt(np.sum(uu * uu, axis=0))
# 3 x N
un = uu / nu[np.newaxis, :]
dotp = np.einsum('ij,i->j',un,np.array([0,0,1]))
alphas = np.arccos(dotp)
#
# calculate Loss for slab CEIL
#
lkco,lkcp = L.sl['CEIL'].losst(fGHz,alphas)
#
# calculate Excess delay for slab CEIL
#
dco , dcp = L.sl['CEIL'].excess_grdelay(theta=alphas)
LossWallo = LossWallo + lkco
LossWallp = LossWallp + lkcp
EdWallo = EdWallo + dco
EdWallp = EdWallp + dcp
# check crossing floor
if (p1[2,:]< L.zfloor).any() or (p2[2,:]< L.zfloor).any():
# WARNING : this test sohould be done individually
if (p1[2]>p2[2]).all():
v0 = p1
v1 = p2
else:
v0 = p2
v1 = p1
uu = v0 - v1
# 1 x N
nu = np.sqrt(np.sum(uu * uu, axis=0))
# 3 x N
un = uu / nu[np.newaxis, :]
dotp = np.einsum('ij,i->j',un,np.array([0,0,1]))
alphas = np.arccos(dotp)
#
# calculate Loss for slab CEIL
#
lkfo,lkfp = L.sl['FLOOR'].losst(fGHz,alphas)
#
# calculate Excess delay for slab CEIL
#
dfo , dfp = L.sl['FLOOR'].excess_grdelay(theta=alphas)
LossWallo = LossWallo + lkfo
LossWallp = LossWallp + lkfp
EdWallo = EdWallo + dfo
EdWallp = EdWallp + dfp
if not dB:
LossWallo = 10**(-LossWallo/10)
LossWallp = 10**(-LossWallp/10)
return(LossWallo,LossWallp,EdWallo,EdWallp)
def gaspl(d,fGHz,T,PhPa,wvden):
""" attenuation due to atmospheric gases
Parameters
----------
d : np.array
range (meters)
fGHz : np.array
frequency (GHz)
T : float
Temprature in degree Celcius
PhPa : float
Pressure in hPa
wvden : float
Water vapor density (g/m**3)
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> T = 15
>>> PhPa = 1013
>>> wvden = 7.5
>>> d = 1000
>>> fGHz = np.linspace(1,1000,100)
>>> L = gaspl(d,fGHz,T,PhPa,wvden)
>>> plt.plot(fGHz,L)
Notes
-----
This function implements the recommandation UIT-P676-10
"""
affO2=np.array([
[50.474214,0.975,9.651,6.690,0.0,2.566,6.850],
[50.987745,2.529,8.653,7.170,0.0,2.246,6.800],
[51.503360,6.193,7.709,7.640,0.0,1.947,6.729],
[52.021429,14.320,6.819,8.110,0.0,1.667,6.640],
[52.542418,31.240,5.983,8.580,0.0,1.388,6.526],
[53.066934,64.290,5.201,9.060,0.0,1.349,6.206],
[53.595775,124.600,4.474,9.550,0.0,2.227,5.085],
[54.130025,227.300,3.800,9.960,0.0,3.170,3.750],
[54.671180,389.700,3.182,10.370,0.0,3.558,2.654],
[55.221384,627.100,2.618,10.890,0.0,2.560,2.952],
[55.783815,945.300,2.109,11.340,0.0,-1.172,6.135],
[56.264774,543.400,0.014,17.030,0.0,3.525,-0.978],
[56.363399,1331.800,1.654,11.890,0.0,-2.378,6.547],
[56.968211,1746.600,1.255,12.230,0.0,-3.545,6.451],
[57.612486,2120.100,0.910,12.620,0.0,-5.416,6.056],
[58.323877,2363.700,0.621,12.950,0.0,-1.932,0.436],
[58.446588,1442.100,0.083,14.910,0.0,6.768,-1.273],
[59.164204,2379.900,0.387,13.530,0.0,-6.561,2.309],
[59.590983,2090.700,0.207,14.080,0.0,6.957,-0.776],
[60.306056,2103.400,0.207,14.150,0.0,-6.395,0.699],
[60.434778,2438.000,0.386,13.390,0.0,6.342,-2.825],
[61.150562,2479.500,0.621,12.920,0.0,1.014,-0.584],
[61.800158,2275.900,0.910,12.630,0.0,5.014,-6.619],
[62.411220,1915.400,1.255,12.170,0.0,3.029,-6.759],
[62.486253,1503.000,0.083,15.130,0.0,-4.499,0.844],
[62.997984,1490.200,1.654,11.740,0.0,1.856,-6.675],
[63.568526,1078.000,2.108,11.340,0.0,0.658,-6.139],
[64.127775,728.700,2.617,10.880,0.0,-3.036,-2.895],
[64.678910,461.300,3.181,10.380,0.0,-3.968,-2.590],
[65.224078,274.000,3.800,9.960,0.0,-3.528,-3.680],
[65.764779,153.000,4.473,9.550,0.0,-2.548,-5.002],
[66.302096,80.400,5.200,9.060,0.0,-1.660,-6.091],
[66.836834,39.800,5.982,8.580,0.0,-1.680,-6.393],
[67.369601,18.560,6.818,8.110,0.0,-1.956,-6.475],
[67.900868,8.172,7.708,7.640,0.0,-2.216,-6.545],
[68.431006,3.397,8.652,7.170,0.0,-2.492,-6.600],
[68.960312,1.334,9.650,6.690,0.0,-2.773,-6.650],
[118.750334,940.300,0.010,16.640,0.0,-0.439,0.079],
[368.498246,67.400,0.048,16.400,0.0,0.000,0.000],
[424.763020,637.700,0.044,16.400,0.0,0.000,0.000],
[487.249273,237.400,0.049,16.000,0.0,0.000,0.000],
[715.392902,98.100,0.145,16.000,0.0,0.000,0.000],
[773.839490,572.300,0.141,16.200,0.0,0.000,0.000],
[834.145546,183.100,0.145,14.700,0.0,0.000,0.000]])
## spectroscopic data for H20
## f0 b1 b2 b3 b4 b5 b6
affH2O=np.array([
[22.235080,0.1130,2.143,28.11,0.69,4.800,1.00],
[67.803960,0.0012,8.735,28.58,0.69,4.930,0.82],
[119.995940,0.0008,8.356,29.48,0.70,4.780,0.79],
[183.310091,2.4200,0.668,30.50,0.64,5.300,0.85,],
[321.225644,0.0483,6.181,23.03,0.67,4.690,0.54],
[325.152919,1.4990,1.540,27.83,0.68,4.850,0.74],
[336.222601,0.0011,9.829,26.93,0.69,4.740,0.61],
[380.197372,11.5200,1.048,28.73,0.54,5.380,0.89,],
[390.134508,0.0046,7.350,21.52,0.63,4.810,0.55],
[437.346667,0.0650,5.050,18.45,0.60,4.230,0.48,],
[439.150812,0.9218,3.596,21.00,0.63,4.290,0.52,],
[443.018295,0.1976,5.050,18.60,0.60,4.230,0.50],
[448.001075,10.3200,1.405,26.32,0.66,4.840,0.67],
[470.888947,0.3297,3.599,21.52,0.66,4.570,0.65,],
[474.689127,1.2620,2.381,23.55,0.65,4.650,0.64],
[488.491133,0.2520,2.853,26.02,0.69,5.040,0.72],
[503.568532,0.0390,6.733,16.12,0.61,3.980,0.43],
[504.482692,0.0130,6.733,16.12,0.61,4.010,0.45],
[547.676440,9.7010,0.114,26.00,0.70,4.500,1.00],
[552.020960,14.7700,0.114,26.00,0.70,4.500,1.00],
[556.936002,487.4000,0.159,32.10,0.69,4.110,1.00],
[620.700807,5.0120,2.200,24.38,0.71,4.680,0.68],
[645.866155,0.0713,8.580,18.00,0.60,4.000,0.50],
[658.005280,0.3022,7.820,32.10,0.69,4.140,1.00],
[752.033227,239.6000,0.396,30.60,0.68,4.090,0.84],
[841.053973,0.0140,8.180,15.90,0.33,5.760,0.45],
[859.962313,0.1472,7.989,30.60,0.68,4.090,0.84],
[899.306675,0.0605,7.917,29.85,0.68,4.530,0.90],
[902.616173,0.0426,8.432,28.65,0.70,5.100,0.95],
[906.207325,0.1876,5.111,24.08,0.70,4.700,0.53],
[916.171582,8.3400,1.442,26.70,0.70,4.780,0.78],
[923.118427,0.0869,10.220,29.00,0.70,5.000,0.80],
[970.315022,8.9720,1.920,25.50,0.64,4.940,0.67],
[987.926764,132.1000,0.258,29.85,0.68,4.550,0.90],
[1780.000000,22,300.0000,0.952,176.20,0.50,30.500]])
dkm = d/1000.
TK = T + 273.15
theta = 300./TK #3
fO2 = affO2[:,0]
a1 = affO2[:,1]
a2 = affO2[:,2]
a3 = affO2[:,3]
a4 = affO2[:,4]
a5 = affO2[:,5]
a6 = affO2[:,6]
fH2O = affH2O[:,0]
b1 = affH2O[:,1]
b2 = affH2O[:,2]
b3 = affH2O[:,3]
b4 = affH2O[:,4]
b5 = affH2O[:,5]
b6 = affH2O[:,6]
e = wvden*TK/216.7 # 4
SO2 = a1*1e-7*PhPa*(theta**3)*np.exp(a2*(1-theta)) # 3
DO2 = a3*1e-4*(PhPa*(theta**(0.8-a4))+1.1*e*theta) # 6a
SH2O = b1*1e-1*e*(theta**(3.5))*np.exp(b2*(1-theta)) # 3
DH2O = b3*1e-4*(PhPa*theta**b4+b5*e*theta**b6) # 6a
DO2_m = np.sqrt(DO2**2+2.25e-6) # 6b
DH2O_m = 0.535*DH2O+np.sqrt(0.217*DH2O**2+(2.1316*1e-12*fH2O**2)/theta)
deltaO2 = (a5+a6*theta)*1e-4*(PhPa+e)*theta**(0.8)
#
# O2
#
uO2 = fO2[:,None]-fGHz[None,:]
vO2 = fO2[:,None]+fGHz[None,:]
n1O2 = DO2_m[:,None]-deltaO2[:,None]*uO2
n2O2 = DO2_m[:,None]-deltaO2[:,None]*vO2
d1O2 = uO2**2 + DO2_m[:,None]**2
d2O2 = vO2**2 + DO2_m[:,None]**2
FO2 = (fGHz[None,:]/fO2[:,None])*(n1O2/d1O2+n2O2/d2O2)
UO2 = SO2[:,None]*FO2
#
# H2O
#
uH2O = fH2O[:,None]-fGHz[None,:]
vH2O = fH2O[:,None]+fGHz[None,:]
nH2O = DH2O_m[:,None]
d1H2O = uH2O**2 + DH2O_m[:,None]**2
d2H2O = vH2O**2 + DH2O_m[:,None]**2
FH2O = (fGHz[None,:]/fH2O[:,None])*(nH2O/d1H2O+nH2O/d2H2O)
UH2O = SH2O[:,None]*FH2O
# Nsec (8)
dD = 5.6e-4*(PhPa+e)*theta**(0.8)
t1 = 6.14e-5/(dD*(1.+(fGHz/dD)**2))
t2 = 1.4e-12*PhPa*(theta**(1.5))/(1+1.9e-5*fGHz**(1.5))
Nsec = fGHz*PhPa*(theta**2)*(t1+t2) # 9
ulow = np.where(fGHz<118.750343)[0]
uhigh = np.where(fGHz>=118.750343)[0]
UO2low = UO2[:,ulow]
UO2high = UO2[:,uhigh]
SO2low = np.sum(UO2low,axis=0)
SO2high = np.sum(UO2high[38:,:],axis=0)
sSO2 = np.hstack((SO2low,SO2high))
Npp = sSO2 + np.sum(UH2O,axis=0)+Nsec
Npp = np.sum(UO2,axis=0) + np.sum(UH2O,axis=0)+Nsec
gamma = 0.1820*fGHz*Npp
LgasdB = gamma*dkm
return(LgasdB)
def Loss0(S,rx,ry,f,p):
""" calculate Loss through Layers for theta=0 deg
Parameters
----------
S : Simulation object
rx : extremity of link
ry : extremity of link
fGHz : float
frequency GHz
p :
"""
Nx = len(rx)
Ny = len(ry)
Lw = np.zeros((Nx,Ny))
print(shape(Lw))
i = 0
for x in rx:
j = 0
for y in ry:
Loss = 0
pxy = np.array([x,y])
seglist,theta = L.angleonlinkold(p,pxy)
for k in seglist:
name = L.name[k]
lk = L.sl[name].loss0(f)
Loss = Loss + lk[0]
Lw[i,j] = Loss
j = j+1
i = i+1
return(Lw)
def Loss_diff(u):
""" calculate Path Loss of the diffraction
"""
if u < -0.7:
Ld = 0
elif u > 1.5:
Ld = 13 + 20*np.log10(u)
else:
Ld = 6.9 + 20*np.log10(np.sqrt((u-0.1)**2+1)+u-0.1)
return(Ld)
def calnu(h,d1,d2,fGHz):
r""" Calculate the diffraction Fresnel parameter
Parameters
----------
h : signed height w.r.t LOS (meter)
d1 : distance 1 (meter)
d2 : distance 2 (meter)
fGHz : frequency GHz
Notes
-----
.. math:: \nu = h \sqrt{\frac{2}{\lambda} \frac{d_1+d_2}{d_1 d_2}}
"""
ld = 0.3/fGHz
nu = h*np.sqrt(2*(d1+d2)/(ld*d1*d2))
return(nu)
def route(X, Y, Z, Ha, Hb, fGHz, K, method='deygout'):
""" diffraction loss along a route
Parameters
----------
X : np.array (Nphi,Nr)
cartesian coordinate grid
Y : np.array (Nphi,Nr)
cartesian coordinate grid
Z : np.array (Nphi,Nr)
height (meters)
Ha : float
Hb : float
fGHz : np.array (,Nf)
frequency in GHz
method : 'deygout' | 'bullington'
Returns
-------
L : Losses (dB)
"""
Nphi, Nr = Z.shape
if (type(fGHz) == float):
fGHz = np.array([fGHz])
Nf = len(fGHz)
L = np.zeros((Nphi, Nf))
L0 = np.zeros(Nf)
# loop over azimut
for ip in range(Nphi):
x = X[ip, :]
y = Y[ip, :]
z = Z[ip, :]
d = np.sqrt((x-x[0])**2+(y-y[0])**2)
# effect of refraction in equivalent earth curvature
dh = d*(d[::-1])/(2*K*6375e3)
z = z + dh
LOS = 32.4 + 20*np.log10(fGHz) + 20*np.log10(d[-1])
z[0] = z[0] + Ha
z[-1] = z[-1] + Hb
if method == 'deygout':
LDiff = deygout(d, z, fGHz, L0, 0)
if method == 'bullington':
LDiff, deq, heq = bullington(d, z, fGHz)
L[ip, :] = LDiff+LOS
return(L)
def cover(X, Y, Z, Ha, Hb, fGHz, K, method='deygout'):
""" outdoor coverage on a region
Parameters
----------
X : np.array (Nphi,Nr)
cartesian coordinate grid
Y : np.array (Nphi,Nr)
cartesian coordinate grid
Z : np.array (Nphi,Nr)
height (meters)
Ha : float
Hb : float
fGHz : np.array (,Nf)
frequency in GHz
method : 'deygout' | 'bullington'
Returns
-------
L : Losses (dB)
"""
Nphi, Nr = Z.shape
if (type(fGHz) == float):
fGHz = np.array([fGHz])
Nf = len(fGHz)
L = np.zeros((Nphi, Nr, Nf))
L0 = np.zeros(Nf)
# loop over azimut
for ip in range(Nphi):
# loop over range
# il : 2 ... Nr-2
# uk : 0 ....Nr-1
for il in np.arange(2, Nr-1):
uk = np.arange(0, il+1)
z = np.empty(len(uk))
x = X[ip, uk]
y = Y[ip, uk]
z[uk] = Z[ip, uk]
d = np.sqrt((x-x[0])**2+(y-y[0])**2)
# effect of refraction in equivalent earth curvature
dh = d*(d[::-1])/(2*K*6375e3)
z = z + dh
LOS = 32.4 + 20*np.log10(fGHz) + 20*np.log10(d[-1])
z[0] = z[0] + Ha
z[-1] = z[-1] + Hb
if method == 'deygout':
LDiff = deygout(d, z, fGHz, L0, 0)
if method == 'bullington':
LDiff, deq, heq = bullington(d, z, fGHz)
L[ip, il, :] = LDiff[None, :]+LOS[None,:]
return(L)
def deygout(d, height, fGHz, L, depth):
""" Deygout attenuation
Parameters
----------
d : np.array (,N)
horizontal distance
height : np.array (,N)
height profile
fGHz : np.array (,Nf)
frequency GHz
L : np.array (,Nf)
Additional Loss
depth : recursive depth
Notes
-----
This function is recursive
"""
lmbda = 0.3/fGHz
L0 = np.zeros(len(fGHz))
depth = depth+1
N = len(height)
if depth < 3:
if N > 3:
u = np.arange(N)/(N-1.0) # float
# l : straight line between termination (LOS)
l = (height[0])*(1-u)+(height[-1])*u
# h excludes termination points
h = height[1:-1] - l[1:-1]
# Fresnel parameter (engagement)
nu = h[:, None] * np.sqrt((2/lmbda[None, :]) *
(1/d[1:-1, None]+1/(d[-1]-d[1:-1, None])))
imax = np.unique(np.nanargmax(nu, axis=0))[0]
numax = nu[imax, :]
else:
numax = -10*np.ones(len(fGHz))
if (numax > -0.78).any():
w = numax - 0.1
L = L + np.maximum(6.9 + 20*np.log10(np.sqrt(w**2+1)+w), 0)
# left link
height1 = height[0:imax+2]
d1 = d[0:imax+2]
Ll = deygout(d1, height1, fGHz, L0, depth)
# right link
height2 = height[imax+1:]
d2 = d[imax+1:]
Lr = deygout(d2, height2, fGHz, L0, depth)
# add losses
L = L + Lr + Ll
return(L)
def bullington(d, height, fGHz):
""" edges attenuation with Bullington method
Parameters
----------
d : np.array
height : np.array
antenna height is includes in height[0] and height[-1]
fGHz : np.array
Returns
-------
L : np.array
total loss
"""
def recl(d, height):
""" determine left interception point
Parameters
----------
d : np.array
height : np.array
"""
N = len(height)
u = np.arange(N)/(N-1.)
# l : straight line between termination (LOS)
l = height[0]*(1-u)+(height[-1])*u
h = height - l
# imax : index of the maximum height offset
imax = np.argmax(h)
if imax>0:
# hmax : maximum height offset
hmax = h[imax]
# parameterization from 0 to imax
ul = np.arange(imax)/(imax-1.)
# straight line
dhl = h[0]*(1-ul) + hmax*ul
# el : offset if <0 split again
el = dhl - h[0:imax]
if np.min(el) < 0:
u, v = recl(d[0:imax+1], height[0:imax+1])
else:
u = d[0:imax+1]
v = h[0:imax+1]
else:
u = d[0:1]
v = d[0:1]
return(u, v)
#if min(er)<0:
# u,v = rec(d[imax-1:-1],dhl)
#else:
#er = dhr - h[imax-1:-1]
def recr(d, height):
""" determine the right interception point
"""
N = len(height)
u = np.arange(N)/(N-1.)
l = height[0]*(1-u)+(height[-1])*u
h = height - l
imax = np.argmax(h)
hmax = h[imax]
ur = np.arange(N-imax)/(N-imax-1.)
dhr = hmax*(1-ur) + h[-1]*ur
er = dhr - h[imax:]
if np.min(er) < 0:
u, v = recr(d[imax:],h[imax:])
else:
u = d[imax:]
v = h[imax:]
return(u,v)
#if min(er)<0:
# u,v = rec(d[imax-1:-1],dhl)
#else:
#er = dhr - h[imax-1:-1]
lmbda = 0.3/fGHz
u = np.arange(len(height))/(len(height)-1.)
l = (height[0])*(1-u)+(height[-1])*u
h = height - l
if (h>0).any():
ul, vl = recl(d, height)
ur, vr = recr(d, height)
idtx = len(ul)
idrx = len(h) - len(ur)
dtx = d[idtx]
drx = d[-1]-d[idrx]
htx = h[idtx-1]
hrx = h[idrx]
deq = (dtx*hrx)*d[-1]/(drx*htx+dtx*hrx)
heq = deq*(htx/dtx)
else:
heq = -np.min(np.abs(h[1:-1]))
ieq = np.where(h==heq)[0][0]
deq = d[ieq]
nu = heq*np.sqrt((2/lmbda)*(1/deq+1/(d[-1]-deq)))
w = nu - 0.1
L = np.maximum(6.9 + 20*np.log10(np.sqrt(w**2+1)+w), 0)
return(L, deq, heq)
def two_rays_flatearth(fGHz, **kwargs):
"""
Parameters
----------
p0 : transmitter position
(3 x Np1) array or (2,) array
p1 : receiver position
(3 x Np2) array or (2,) array
OR :
d : distance between Tx and Rx
(Np1,)
ht : Tx height
hr : Rx height
(Np1)
GtdB : float (0)
Transmitter Antenna Gain (dB)
GrdB : float(0)
Receiver Antenna Gain (dB)
fGHz : float (2.4)
frequency (GHz)
gamma : complex (-1.+0.j)
Reflexion coeff
dB : boolean (True)
return result in d
Returns
-------
P :
received power
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.loss import *
>>> NPT=10000
>>> x=np.array([0,0,8])
>>> x=x.reshape(3,1)
>>> y = np.ones((3,NPT))
>>> y[0,:]=0
>>> y[1,:]=np.arange(NPT)
>>> y[2,:]=2
>>> g0=1
>>> g1=1
>>> fGHz=2.4
>>> PL2R=two_rays_flatearth(p0=x,p1=y,fGHz=fGHz,GtdB=g0,GrdB=g1)
>>> PL1R = PL(fGHz,x,y,2)
>>> plt.semilogx(PL2R,label='two-ray model')
>>> plt.semilogx(-PL1R[0,:],label='one slope model')
>>> plt.axis([10,NPT,-150,-50])
>>> plt.title('Loss 2-rays model vs one slope model')
>>> plt.xlabel('distance (m)')
>>> plt.ylabel('Loss Pr/Pt (dB)')
>>> plt.legend()
>>> plt.show()
>>> d=np.arange(1,1000)
>>> PL2Rd = two_rays_flatearth(d=d,ht=np.array([5]),hr=np.array([10]),fGHz=fGHz,GtdB=g0,GrdB=g1)
>>> plt.semilogx(PL2Rd,label='two-ray model')
>>> plt.semilogx(-PL1R[0,:],label='one slope model')
>>> plt.axis([10,NPT,-150,-50])
>>> plt.title('Loss 2-rays model vs one slope model')
>>> plt.xlabel('distance (m)')
>>> plt.ylabel('Loss Pr/Pt (dB)')
>>> plt.legend()
>>> plt.show()
References
----------
https://en.wikipedia.org/wiki/Two-ray_ground-reflection_model#As_a_case_of_log_distance_path_loss_model
http://morse.colorado.edu/~tlen5510/text/classwebch3.html#x15-590003.3.3
"""
defaults = { 'p0':np.array((0,0,10)),
'p1':np.array((0,10,10)),
'd':[],
'ht':10,
'hr':10,
'GtdB':0.,
'GrdB':0.,
'gamma': -1.+0.j,
'pol':'v',
'eps' :[],
'sig':0.,
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
GtdB=kwargs.pop('GtdB')
GrdB=kwargs.pop('GrdB')
Gt = 10**((1.*GtdB)/10.)
Gr = 10**((1.*GrdB)/10.)
gamma=kwargs.pop('gamma')
pol=kwargs.pop('pol')
eps=kwargs.pop('eps')
sig=kwargs.pop('sig')
if kwargs['d'] == []:
p0=kwargs['p0']
p1=kwargs['p1']
assert p0.shape[0] == 3, 'p0 is not 3D'
assert p1.shape[0] == 3, 'p1 is not 3D'
if len(p0.shape) == 1:
p0=p0.reshape(p0.shape[0],1)
if len(p1.shape) == 1:
p1=p1.reshape(p1.shape[0],1)
p0=1.*p0
p1=1.*p1
ht = p0[2,:]
hr = p1[2,:]
dloss = np.sqrt(np.sum((p0-p1)**2,axis=0)) #l0
else:
dloss=kwargs['d']
ht=kwargs['ht']
hr=kwargs['hr']
Gt = 10**((1.*Gt)/10.)
Gr = 10**((1.*Gr)/10.)
d0 = np.sqrt( dloss**2 - 1.*(ht-hr)**2 ) # d0
dref = np.sqrt(d0**2+1.*(ht+hr)**2) #l0'
if eps != []:
psy = np.arcsin((ht+hr)/dref)
er = eps - 60.j*sig*0.3/fGHz
if pol == 'v':
Z = (1./er)* np.sqrt(er-np.cos(psy)**2)
elif pol == 'h':
Z = np.sqrt(er-np.cos(psy)**2)
gamma = (np.sin(psy)-Z)/((np.sin(psy)+Z))
deltad = dref-dloss
deltaphi = (2*np.pi*fGHz*deltad)/0.3
E= (0.3/(4*np.pi*fGHz)) * (np.sqrt(Gt*Gr)/dloss + gamma * np.sqrt(Gr*Gr)*(np.exp(-1.j*deltaphi))/dref)
P = abs(E)**2
# import ipdb
# ipdb.set_trace()
if kwargs['dB'] :
return 10*np.log10(P)
else:
return P
def lossref_compute(P,h0,h1,k=4/3.) :
"""
compute loss and reflection rays on curved earth
Parameters
----------
P : float |list
if len(P) == 1 => P is a distance
if len(P) == 4 => P is a list of [lon0,lat0,lon1,lat1]
where :
lat0 : float |string
latitude first point (decimal |deg min sec Direction)
lat1 : float |string
latitude second point (decimal |deg min sec Direction)
lon0 : float |string
longitude first point (decimal |deg min sec Direction)
lon1 : float |string
longitude second point (decimal |deg min sec Direction)
h0 : float:
height of 1st point
h1 : float:
height of 2nd point
k : electromagnetic earth factor
Returns
-------
dloss : float
length of direct path (meter)
dref : float
length of reflective path (meter)
psy : float
Reflection angle
References
----------
<NAME>, Radar systems analysis and design using MATLAB, Third edition. Boca Raton; London: CRC/Taylor & Francis, chapter 8, 2013.
"""
if isinstance(P,float) or isinstance(P,int) :
#P is a distance
r=P
mode = 'dist'
elif isinstance(P,np.ndarray) or isinstance(P,list):
if len(P) == 1:
#P is a distance
r=P
mode = 'dist'
elif len(P) == 4:
#P is a lonlat
lat0=P[0]
lon0=P[1]
lat1=P[2]
lon1=P[2]
mode = 'lonlat'
else :
raise AttributeError('P must be a list [lat0,lon0,lat1,lon0] or a distance')
else :
raise AttributeError('Invalid P format ( list |ndarray )')
# if h0<h1:
# h1,h0 = h0,h1
r0 = 6371.e3 # earth radius
re = k*r0 # telecom earth radius
if mode == 'lonlat':
# r = distance curvilignenp.arcsin((h1/R1)-R1/(2.*re)) entre TXetRX / geodesic
r = gu.distance_on_earth(lat0, lon0, lat1, lon1)
else :
r=P
r=1.*r
# import ipdb
# ipdb.set_trace()
p = 2/(np.sqrt(3))*np.sqrt(re*(h0+h1)+(r**2/4.)) #eq 8.45
eps = np.arcsin(2*re*r*(h1-h0)/p**3) # eq 8.46
#distance of reflection on curved earth
r1 = r/2 - p*np.sin(eps/3) #eq 8.44
r2 = r -r1
phi1 = r1/re #8.47
phi2 = r2/re # 8.48
R1 = np.sqrt(h0**2+4*re*(re+h0)*(np.sin(phi1/2))**2) # 8.51
R2 = np.sqrt(h1**2+4*re*(re+h1)*(np.sin(phi2/2))**2) #8.52
Rd = np.sqrt((h1-h0)**2+4*(re+h1)*(re+h0)*np.sin((phi1+phi2)/2.)**2) # 8.53
# tangente angle on earth
psy = np.arcsin((h1/R1)-R1/(2.*re)) #eq 8.55
deltaR = 4*R1*R2*np.sin(psy)**2/(R1+R2+Rd)
dloss = Rd
dref = R1+R2
return psy,dloss,dref
def two_rays_curvedearthold(P,h0,h1,fGHz=2.4,**kwargs):
"""
Parameters
----------
P : float |list
if len(P) == 1 => P is a distance
if len(P) == 4 => P is a list of [lon0,lat0,lon1,lat1]
where :
lat0 : float |string
latitude first point (decimal |deg min sec Direction)
lat1 : float |string
latitude second point (decimal |deg min sec Direction)
lon0 : float |string
longitude first point (decimal |deg min sec Direction)
lon1 : float |string
longitude second point (decimal |deg min sec Direction)
h0 : float:
height of 1st point
h1 : float:
height of 2nd point
fGHz : float
frequency (GHz)
k : float
electromagnetic earth factor
GtdB : float
Transmitter Antenna Gain (dB)
GrdB : float
Receiver Antenna Gain (dB)
gamma : complex (-1.+0.j)
Reflexion coeff if eps and sig are not precised
'pol': string ('v')
polarization ('v'|'h')
'eps' : float ([])
lossless relative permittivity [],
'sig': float (0.)
conductivity
dB : boolean (True)
return result in dB
Returns
-------
P :
received power
Examples
--------
.. plot::
:include-source:
>>> from pylayers.antprop.loss import *
>>> import matplotlib.pyplot as plt
>>> fGHz=2.4
>>> p0=np.array(([0,0,20]))
>>> p1=np.array(([0,1,20]))
>>> p0=p0.reshape(3,1)
>>> p1=p1.reshape(3,1)
>>> TRF = [] #Two Ray model on flat earth
>>> TRC = [] #Two Ray model on curved earth
>>> PLoss=[]
>>> for d in np.arange(1,10000,1):
>>> p1[1,:]=d
>>> TRF.append(two_rays_flatearth(p0[:,0],p1[:,0],fGHz,GtdB=0.,GrdB=0.,))
>>> TRC.append(two_rays_curvedearth(d,p0[2,:],p1[2,:],fGHz))
>>> PLoss.append(PL(fGHz, p0[:,0],p1[:,0], n=2.0, dB=True, d0=np.array([1])))
>>> PLoss=np.array(PLoss)[:,0,0]
>>> plt.semilogx(TRF,label='two-rays model flat earth')
>>> plt.semilogx(TRC,label='two-rays model curved earth')
>>> plt.semilogx(-PLoss,label='Path Loss')
>>> plt.legend()
>>> plt.show()
"""
defaults = { 'GtdB':0.,
'GrdB':0.,
'k':4/3.,
'gamma': -1.+0.j,
'pol':'v',
'eps' :[],
'sig':0.,
'mode':'PL',
'dB':True
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
GtdB=kwargs.pop('GtdB')
GrdB=kwargs.pop('GrdB')
Gt = 10**((1.*GtdB)/10.)
Gr = 10**((1.*GrdB)/10.)
k=kwargs.pop('k')
gamma=kwargs.pop('gamma')
pol=kwargs.pop('pol')
eps=kwargs.pop('eps')
sig=kwargs.pop('sig')
h0=1.*h0
h1=1.*h1
psy,dloss,dref = lossref_compute(P,h0,h1,k)
if eps != []:
er = eps - 60.j*sig*0.3/fGHz
if pol == 'v':
Z= (1./er)* np.sqrt(er- | np.cos(psy) | numpy.cos |
"""
This module consists of some helper functions for creating neuronal networks.
"""
import numpy as np
def poisson_homogenous(lam, timesteps):
"""
Generate a poisson spike train for a single neuron
using a homogenous poisson distribution.
.. image:: _images/homogenous_plot.png
:alt: Homogenous Poisson spike train
:width: 400px
:Example:
The spike train of the image above was generated by following function:
>>> poisson_homogenous(0.4, 200)
:param lam: lambda value
:type lam: Float
:param timesteps: total length of spike train
:type timesteps: Int
"""
size = (1, timesteps)
spiketrain = np.random.poisson(lam=lam, size=size)
spiketrain = np.array(spiketrain, dtype=bool)
return spiketrain
def poisson_inhomogenous(lambdas, timesteps):
"""
Generate a poisson spike train for a single neuron
using an inhomogenous poisson distribution.
.. image:: _images/inhomogenous_plot.png
:alt: Inhomogenous Poisson spike train
:width: 400px
:Example:
The spike train of the image above was generated by following function:
>>> poisson_inhomogenous((0.5, 0.25, 0, 0, 1, 0.5, 0, 0, 0.25, 0.5), 200)
:param lambdas: Lambda values
:type lambdas: List or Tuple
:param timesteps: total length of the spike train
:type timesteps: Int
"""
if timesteps % len(lambdas) != 0:
raise ValueError("Cannot divide the %d mu's on the %d timesteps equally." % (len(lambdas), timesteps))
spiketrain = | np.zeros((1, timesteps), dtype=bool) | numpy.zeros |
import numpy as np
from scipy.sparse import csr_matrix
import myLOBPCG_new
###########################################################################
#
# Parameters and file path
#
###########################################################################
TJfile='../../TJdata/triples_30000.dat'
prefix='../../output/' # directory contains rowA.binary, colA.binary, valA.binary; and will save outputs
numTJ=30000 # number of triple junctions
lamb=1000 # hyperparameter for the strength of the regularization
sym='Cubic' # Cubic or Hex, it changes the gbdat file header
fn= prefix+'Cub.gbdat' # the name of output gbdat file
###########################################################################
#
# Define util functions
#
###########################################################################
def read_dat(datFile, numTJ):
"""
Input: triples.dat, wrote from the fortran program Torq_gen
size=[numTJ*8,]
In each group, the data is [TJ directon, EA1, GB1, EA2, GB2, EA3, GB3]
Output: TJs, direction of the triple junctions
size = [numTJ, 3]
EAs, the EA angles of the 3 grains at a TJ
size = [numTJ, 3, 3]
norms, normal direction of the 3 GB at a TJ
size = [numTJ, 3, 3]
"""
with open(datFile) as f:
tmp = [line.split() for line in f if line.strip()]
TJs = np.zeros((numTJ, 3))
EAs = np.zeros((numTJ, 3, 3))
norms = np.zeros((numTJ, 3, 3))
for i in range(numTJ):
TJs[i,:] = np.array(tmp[i*8 + 1]).astype(float)
EAs[i,0, :] = np.array(tmp[i*8 + 2]).astype(float)
norms[i,0, :] = np.array(tmp[i*8 + 3]).astype(float)
EAs[i, 1, :] = np.array(tmp[i*8 + 4]).astype(float)
norms[i, 1, :] = np.array(tmp[i*8 + 5]).astype(float)
EAs[i, 2, :] = np.array(tmp[i*8 + 6]).astype(float)
norms[i, 2, :] = np.array(tmp[i*8 + 7]).astype(float)
return (TJs, EAs, norms)
def EulerZXZ2Mat(e):
"""
Active Euler Angle (radian) in ZXZ convention to active rotation matrix, which means newV=M*oldV
"""
x=e[0]
y=e[1]
z=e[2]
s1=np.sin(x)
s2=np.sin(y)
s3=np.sin(z)
c1=np.cos(x)
c2=np.cos(y)
c3=np.cos(z)
m=np.array([[c1*c3-c2*s1*s3,-c1*s3-c3*c2*s1,s1*s2],
[s1*c3+c2*c1*s3,c1*c2*c3-s1*s3,-c1*s2],
[s3*s2,s2*c3,c2]])
return m
def EAtoG(EA):
"""
Input: a set of Euler Angle
size=[3,]
Output: the corresponding orientation matrix g
size = [3, 3]
"""
g = np.zeros((3,3))
EA = np.radians(EA)
g=EulerZXZ2Mat(EA).T
return g
###########################################################################
#
# Construct and solve the minimization problem to get GB energy
#
###########################################################################
(TJs, EAs, norms) = read_dat(TJfile, numTJ)
Norm=np.empty((numTJ*3,3))
for i in range(numTJ):
Norm[3*i]=EAtoG(EAs[i,1]).dot(norms[i,0])
Norm[3*i+1]=EAtoG(EAs[i,2]).dot(norms[i,1])
Norm[3*i+2]=EAtoG(EAs[i,0]).dot(norms[i,2])
for j in range(len(Norm)):
Norm[j]=Norm[j]/( | np.linalg.norm(Norm[j]) | numpy.linalg.norm |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import struct
import unittest
import os
from hypothesis import given, example
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
np.set_printoptions(precision=6)
class TestFloatToFusedRandRowwiseQuantized(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=2, max_dim=2,
min_value=1, max_value=17), # only matrix is supported
bitwidth_=st.sampled_from([1, 2, 4, 8]),
random_=st.booleans(),
**hu.gcs)
@example(X=np.array([[0., 0., 0., 0.264019]]).astype(np.float32),
bitwidth_=2,
random_=False,
**hu.gcs)
@unittest.skip("Test is flaky, see https://github.com/pytorch/pytorch/issues/28550")
def test_rand_quantization(self, X, bitwidth_, random_, gc, dc):
# python reference of encoder
def quantization_ref(X):
in_shape = X.shape
data_per_byte = 8 // bitwidth_
output_cols = 10 + in_shape[1] // data_per_byte
tail = 0
if in_shape[1] % data_per_byte:
output_cols += 1
tail = data_per_byte - in_shape[1] % data_per_byte
segment = output_cols - 10
out = | np.zeros((in_shape[0], output_cols), dtype=np.uint8) | numpy.zeros |
""" Functions for fetching USarray waveforms. """
from __future__ import print_function, division, absolute_import
import os
import sys
import datetime
import argparse
import copy
import time
# Check version
if sys.version_info.major < 3:
import urllib as urllibrary
else:
import urllib.request as urllibrary
import urllib
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal
import obspy
from wmpl.Utils.Earth import greatCircleDistance
from wmpl.Utils.OSTools import mkdirP
from wmpl.Utils.PlotMap import GroundMap
from wmpl.Utils.Math import subsampleAverage
import pyximport
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from supra.Fireballs.SeismicTrajectory import timeOfArrival, waveReleasePoint, waveReleasePointWinds, Constants
from supra.Utils.Classes import Position
from supra.Supracenter.cyscan5 import cyscan
from supra.Atmosphere.Parse import parseWeather
DATA_FILE = 'data.txt'
C = ['r', 'g', 'm', 'k', 'y']
'''Reads input config files'''
try:
# Python 2
import ConfigParser as configparser
except:
# Python 3
import configparser
import os
import sys
import datetime
def butterworthBandpassFilter(lowcut, highcut, fs, order=5):
""" Butterworth bandpass filter.
Argument:
lowcut: [float] Lower bandpass frequency (Hz).
highcut: [float] Upper bandpass frequency (Hz).
fs: [float] Sampling rate (Hz).
Keyword arguments:
order: [int] Butterworth filter order.
Return:
(b, a): [tuple] Butterworth filter.
"""
# Calculate the Nyquist frequency
nyq = 0.5*fs
low = lowcut/nyq
high = highcut/nyq
# Init the filter
b, a = scipy.signal.butter(order, [low, high], btype='bandpass')
return b, a
def convolutionDifferenceFilter(waveform_data):
""" Apply the convolution filter on data as suggested in Kalenda et al. (2014). """
# Apply the filter
filtered_data = np.convolve(waveform_data, [-0.5, 1.0, -0.5], mode='same')
# Detrend data
filtered_data = filtered_data - np.mean(filtered_data)
return filtered_data
def plotStationMap(dir_path, data_list, lat_centre, lon_centre, setup, sounding, ax=None, isc_data=None):
""" Plots the map of siesmic stations from loaded data file. """
fig = plt.figure(figsize=plt.figaspect(0.5))
fig.set_size_inches(20.9, 11.7)
if ax is None:
ax = plt.gca()
# Find unique networks
# networks = [entry[0] for entry in data_list]
# stat = [entry[1] for entry in data_list]
# net_isc = []
# lats=[]
# lons=[]
# Extra stations
if isc_data is not None:
all_stns = data_list + isc_data
# Remove duplicates
# k = sorted(isc_data)
# isc_data = [k[i] for i in range(len(k)) if i == 0 or k[i] != k[i-1]]
# for line in isc_data:
# # Only use stations within 5 degrees of lat and lon
# if abs(line[2] - lat_centre) < 5 and abs(line[3] - lon_centre) < 5:
# lats.append(np.radians(line[2]))
# lons.append(np.radians(line[3]))
# net_isc.append(line[5])
# # Extract the list of station locations
# lat_list = [np.radians(entry[2]) for entry in data_list]
# lon_list = [np.radians(entry[3]) for entry in data_list]
if len(all_stns) == 0:
print("ERROR: No stations to plot!")
exit()
lats = []
lons = []
for i in range(len(all_stns)):
lats.append(all_stns[i].position.lat_r)
lons.append(all_stns[i].position.lon_r)
# Plot stations and extra stations
m = GroundMap(lats, lons, ax=ax, color_scheme='light')
# Plot different networks with different colours
for stn in all_stns:
# # Extract the list of station locations
# lat_net_list = [np.radians(entry[2]) for entry in data_list]
# lon_net_list = [np.radians(entry[3]) for entry in data_list]
m.scatter(stn.position.lat_r, stn.position.lon_r, s=2, label=stn.network)
# for i in range(len(lat_net_list)):
x, y = m.m(stn.position.lon, stn.position.lat)
plt.text(x, y, stn.network + '-' + stn.code, horizontalalignment='left', verticalalignment='top', color='k', fontsize=8)
# if stat[i] in setup.rm_stat:
# pass
# # print('Excluding station: {:}'.format(networks[i] + '-' + stat[i]))
# else:
# if stat[i] in setup.high_f:
# m.scatter(lat_net_list[i], lon_net_list[i], s=25, c='g')
# elif stat[i] in setup.high_b:
# m.scatter(lat_net_list[i], lon_net_list[i], s=25, c='b')
# # if len(lats) != 0:
# for i in range(len(net_isc)):
# x, y = m.m(np.degrees(lons[i]), np.degrees(lats[i]))
# plt.text(x, y, net_isc[i], horizontalalignment='left', verticalalignment='top', color='k', fontsize=8)
lx, ly = m.m(lon_centre, lat_centre)
# # All extra stations added
# if isc_data is not None:
# for i in range(len(net_isc)):
# # Convert coordinates to map coordinates
# x, y = m.m(np.degrees(lons[i]), np.degrees(lats[i]))
# # Plot extra stations
# m.scatter(lats[i], lons[i], marker='^', c='k', s=1, )
# # Plot the text
# #plt.text(x, y, net_isc[i], horizontalalignment='left', verticalalignment='top', color='k', fontsize=8)
# data_list.append(isc_data[i])
# Plot source location
m.scatter([np.radians(lat_centre)], [np.radians(lon_centre)], marker='*', c='yellow', edgecolor='k', \
linewidth=0.1, label='Source')
# Plot the trajectory or fragmentation point if given
if setup.show_fragmentation_waveform or setup.show_ballistic_waveform:
if setup.show_fragmentation_waveform:
for i, line in enumerate(setup.fragmentation_point):
# Fragmentation plot
m.scatter([np.radians(float(line[0]))], [np.radians(float(line[1]))], c=C[(i+1)%4], marker='x')
# Extract coordinates of the reference station
ref_pos = position(lat_centre, lon_centre, 0)
# # Calculate the coordinates of the trajectory intersection with the ground
# lat_i, lon_i, elev_i = local2LatLon(float(np.radians(lat0)), float(np.radians(lon0)), float(0), \
# np.array([float(setup.lat_f), float(setup.lon_f), 0]))
# Calculate the coordinate of the beginning of the trajectory
# lat_beg, lon_beg = np.radians(float(np.degrees(setup.lat_i)) - np.cos(np.radians(setup.azim))), \
# np.radians(float(np.degrees(setup.lon_i)) - np.sin(np.radians(setup.azim)))
if setup.show_ballistic_waveform:
# Plot intersection with the ground
m.scatter(setup.traj_f.lat_r, setup.traj_f.lon_r, s=10, marker='x', c='b')
# Plot the trajectory
m.plot([setup.traj_i.lat_r, setup.traj_f.lat_r], [setup.traj_i.lon_r, setup.traj_f.lon_r], c='b')
# Get the limits of the plot
# (approximately a box around the deg_radius)
x_min = setup.traj_f.lon - 100000*setup.deg_radius
x_max = setup.traj_f.lon + 100000*setup.deg_radius
y_min = setup.traj_f.lat - 100000*setup.deg_radius
y_max = setup.traj_f.lat + 100000*setup.deg_radius
# Grid size of the contour plot
img_dim = setup.contour_res
x_data = np.linspace(x_min, x_max, img_dim)
y_data = np.linspace(y_min, y_max, img_dim)
xx, yy = np.meshgrid(x_data, y_data)
# # Make an array of all plane coordinates
plane_coordinates = np.c_[xx.ravel(), yy.ravel(), np.zeros_like(xx.ravel())]
times_of_arrival = np.zeros_like(xx.ravel())
# print('Creating contour plot...')
# # Calculate times of arrival for each point on the reference plane
# Calculate times of arrival for each point on the reference plane
az = np.radians(setup.azim)
ze = np.radians(setup.zangle)
# vector of the fireball trajectory
traj_vect = np.array([np.sin(az)*np.sin(ze), np.cos(az)*np.sin(ze), -np.cos(ze)])
#traj_vect = np.array([np.cos(az)*np.cos(ze), np.sin(az)*np.cos(ze), -np.sin(ze)])
for i, plane_coords in enumerate(plane_coordinates):
# Print out percent done
if (i + 1) % 10 == 0:
sys.stdout.write("\rDrawing Contour: {:.2f} %".format(100*(i + 1)/img_dim**2))
sys.stdout.flush()
time.sleep(0.001)
setup.traj_f.pos_loc(ref_pos)
# Point on the trajectory where the plane coordinate arrival came from
try:
p = waveReleasePointWinds(plane_coords, setup.traj_f.x, setup.traj_f.y, setup.t0, 1000*setup.v, np.radians(setup.azim), \
np.radians(setup.zangle), setup, sounding, [ref_pos.lat_r, ref_pos.lon_r, ref_pos.elev])
except:
p = waveReleasePoint(plane_coords, setup.traj_f.x, setup.traj_f.y, setup.t0, 1000*setup.v, np.radians(setup.azim), \
np.radians(setup.zangle), setup.v_sound)
# Coordinate transformation (rotate 90 deg CCW)
# p[0], p[1] = -p[1], p[0]
# # vector between the wave release point and the plane coordinate
# d_vect = plane_coords - p
# # Since the arrivals are always perpendicular to the fireball trajectory, only take arrivals where the dot product
# # of the vectors are small. This may not hold true for weather?
# #print(np.dot(d_vect, traj_vect))
# if np.dot(d_vect, traj_vect) < setup.dot_tol:
ti = timeOfArrival(plane_coords, setup.traj_f.x, setup.traj_f.y, setup.t0, 1000*setup.v, np.radians(setup.azim), \
np.radians(setup.zangle), setup, sounding=sounding, ref_loc=[ref_pos.lat_r, ref_pos.lon_r, ref_pos.elev], travel=True, fast=True)
#escape value for when there is no arrival
# else:
# ti = np.nan
times_of_arrival[i] = ti + setup.t0
# if there is no arrival, set to the maximum value on the contour
max_time = np.nanmax(times_of_arrival)
for i in range(len(times_of_arrival)):
if np.isnan(times_of_arrival[i]):
times_of_arrival[i] = max_time
times_of_arrival = times_of_arrival.reshape(img_dim, img_dim)
# Determine range and number of contour levels, so they are always centred around 0
toa_abs_max = np.max([np.abs(np.min(times_of_arrival)), np.max(times_of_arrival)])
# toa_abs_min = np.min([np.abs(np.min(times_of_arrival)), np.max(times_of_arrival)])
levels = np.linspace(0, toa_abs_max, 25)
### Convert contour local coordinated to geo coordinates
lat_cont = []
lon_cont = []
for x_cont, y_cont in zip(xx.ravel(), yy.ravel()):
lat_c, lon_c, _ = loc2Geo(ref_pos.lat, ref_pos.lon, ref_pos.elev, np.array([x_cont, y_cont, 0]))
lat_cont.append(lat_c)
lon_cont.append(lon_c)
lat_cont = | np.array(lat_cont) | numpy.array |
import numpy as np
import cv2
from matplotlib import pyplot as plt
from library import *
import time
from keras import layers
from keras.models import Model
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto(allow_soft_placement=True)
from models import *
from keras.models import load_model
#, device_count = {'CPU' : 1, 'GPU' : 1})
config.gpu_options.per_process_gpu_memory_fraction = 0.1
set_session(tf.Session(config=config))
# VGG like network
vgg_input_shape = tuple([60,60]) + tuple([1])
MODEL_NAME = 'AID_simCos_BigDesc_dropout'
weights2load = 'model-data/model.'+MODEL_NAME+'_75.hdf5'
train_model, sim_type = create_model(vgg_input_shape, None, model_name = MODEL_NAME, Norm=None, resume = True, ResumeFile = weights2load)
import subprocess
def IMAScaller(img1,img2, desc = 11, MatchingThres = 0, knn_num = 2, Rooted = True, GFilter=2, Visual=False):
cv2.imwrite("/tmp/img1.png",img1)
cv2.imwrite("/tmp/img2.png",img2)
_ = subprocess.check_output("cp acc-test/z_main /tmp", shell=True)
_ = subprocess.check_output('cd /tmp && ./z_main -im1 "./img1.png" -im2 "./img2.png" -desc %d -applyfilter %d > imas.out'%(desc,GFilter), shell=True)
# imasout = subprocess.check_output('cd /tmp && cat imas.out', shell=True).decode('utf-8')
# print(imasout)
ET_KP = float(subprocess.check_output('cd /tmp && cat imas.out | grep "IMAS-Detector accomplished in" | cut -f 4 -d" " ', shell=True).decode('utf-8'))
ET_M = float(subprocess.check_output('cd /tmp && cat imas.out | grep "IMAS-Matcher accomplished in" | cut -f 4 -d" " ', shell=True).decode('utf-8'))
KPs1 = int(subprocess.check_output('cd /tmp && cat imas.out | grep "image 1" | cut -f 7 -d" " ', shell=True).decode('utf-8'))
KPs2 = int(subprocess.check_output('cd /tmp && cat imas.out | grep "image 2" | cut -f 7 -d" " ', shell=True).decode('utf-8'))
simus = int(subprocess.check_output('cd /tmp && cat imas.out | grep "image 1" | cut -f 14 -d" " ', shell=True).decode('utf-8'))
Total = int(subprocess.check_output('cd /tmp && cat imas.out | grep "possible matches have been found" | cut -f 4 -d" " ', shell=True).decode('utf-8'))
Filtered = int(subprocess.check_output('cd /tmp && cat imas.out | grep "Final number of matches" | cut -f 10 -d" " ', shell=True).decode('utf-8')[:-2])
return Total, Filtered, ET_KP, ET_M, KPs1, KPs2, simus
import sklearn.preprocessing
def RootSIFT(img1,img2, MatchingThres = 0, knn_num = 2, Rooted = True, GFilter=1, Visual=False):
start_time = time.time()
KPlist1, sift_des1 = ComputeSIFTKeypoints(img1, Desc = True)
KPlist2, sift_des2 = ComputeSIFTKeypoints(img2, Desc = True)
Identity = np.float32([[1, 0, 0], [0, 1, 0]])
h, w = img1.shape[:2]
KPlist1, sift_des1, temp = Filter_Affine_In_Rect(KPlist1,Identity,[0,0],[w,h], desc_list = sift_des1)
h, w = img2.shape[:2]
KPlist2, sift_des2, temp = Filter_Affine_In_Rect(KPlist2,Identity,[0,0],[w,h], desc_list = sift_des2)
if Rooted:
sift_des1 = np.sqrt(sklearn.preprocessing.normalize(sift_des1, norm='l2',axis=1))
sift_des2 = np.sqrt(sklearn.preprocessing.normalize(sift_des2, norm='l2',axis=1))
ET_KP = time.time() - start_time
bf = cv2.BFMatcher()
start_time = time.time()
sift_matches = bf.knnMatch(sift_des1,sift_des2, k=knn_num)
ET_M = time.time() - start_time
# Apply ratio test
lda = CPPbridge('./build/libDA.so')
sift_all = []
if knn_num==2:
for m,n in sift_matches:
if m.distance < MatchingThres*n.distance:
sift_all.append(m)
elif knn_num==1:
for m in sift_matches:
if m[0].distance <= MatchingThres:
sift_all.append(m[0])
sift_all = OnlyUniqueMatches(sift_all,KPlist1,KPlist2,SpatialThres=5)
sift_consensus = []
if GFilter>0 and len(sift_all)>10:
sift_src_pts = np.float32([ KPlist1[m.queryIdx].pt for m in sift_all ]).ravel()
sift_dst_pts = np.float32([ KPlist2[m.trainIdx].pt for m in sift_all ]).ravel()
matchesMask_sift, H_sift = lda.GeometricFilter(sift_src_pts, img1, sift_dst_pts, img2, Filer='ORSA_H')
for i in range(0,len(matchesMask_sift)):
if matchesMask_sift[i]==True:
sift_consensus.append(sift_all[i])
if Visual:
img4 = cv2.drawMatches(img1,KPlist1,img2,KPlist2,sift_all, None,flags=2)
cv2.imwrite('./temp/SIFTmatches.png',img4)
img4 = cv2.drawMatches(img1,KPlist1,img2,KPlist2,sift_consensus, None,flags=2)
cv2.imwrite('./temp/SIFT_homography_matches.png',img4)
return sift_all, sift_consensus, ET_KP, ET_M
def siftAID(img1,img2, MatchingThres = math.inf, Simi='SignProx', knn_num = 1, GFilter=1, Visual=False, safe_sim_thres_pos = 0.8, safe_sim_thres_neg = 0.2, GetAllMatches=False):
if Simi=='CosProx':
FastCode = 0
elif Simi=='SignProx':
FastCode = 1
else:
print('Wrong similarity choice for AI-SIFT !!!')
exit()
# find the keypoints with SIFT
start_time = time.time()
KPlist1, sift_des1 = ComputeSIFTKeypoints(img1, Desc = True)
KPlist2, sift_des2 = ComputeSIFTKeypoints(img2, Desc = True)
Identity = | np.float32([[1, 0, 0], [0, 1, 0]]) | numpy.float32 |
"""Implementation of vpype's data model
"""
import logging
import math
from typing import Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union, cast
import numpy as np
from shapely.geometry import LinearRing, LineString, MultiLineString
from .geometry import crop, reloop
from .line_index import LineIndex
# REMINDER: anything added here must be added to docs/api.rst
__all__ = [
"LineCollection",
"Document",
"LineLike",
"LineCollectionLike",
"as_vector",
# deprecated:
"VectorData",
]
LineLike = Union[LineString, LinearRing, Iterable[complex]]
# We accept LineString and LinearRing as line collection because MultiLineString are regularly
# converted to LineString/LinearRing when operation reduce them to single-line construct.
LineCollectionLike = Union[
Iterable[LineLike], MultiLineString, "LineCollection", LineString, LinearRing
]
def as_vector(a: np.ndarray):
"""Return a view of a complex line array that behaves as an Nx2 real array"""
return a.view(dtype=float).reshape(len(a), 2)
# noinspection PyShadowingNames
class LineCollection:
"""
:py:class:`LineCollection` encapsulate a list of piecewise linear lines (or paths). Lines
are implemented as 1D numpy arrays of complex numbers whose real and imaginary parts
represent the X, respectively Y, coordinates of point in the paths.
An instance of :py:class:`LineCollection` is used to model a single layer in vpype's
:ref:`pipeline <fundamentals_pipeline>`. The complete pipeline is modelled by a
:py:class:`Document` instance, which essentially is a mapping of ``int`` (layer ID) to
:py:class:`LineCollection`.
Although the actual ``list`` is stored as private data member in :py:class:`LineCollection`
instances, the class provides a sequence API similar to ``list``::
>>> import vpype, numpy as np
>>> lc = vpype.LineCollection()
>>> lc.append(np.array([0, 10. + 10.j]))
>>> lc.append(np.array([10.j, 5. + 5.j]))
>>> len(lc)
2
>>> lc[0]
array([ 0. +0.j, 10.+10.j])
>>> for line in lc:
... print(repr(line))
...
array([ 0. +0.j, 10.+10.j])
array([0.+10.j, 5. +5.j])
In addition to Numpy arrays, the class accepts paths expressed in a variety of format
including Python ``list`` or Shapely objects::
>>> from shapely.geometry import LineString, LinearRing, MultiLineString
>>> lc = vpype.LineCollection()
>>> lc.append([5, 5+5j])
>>> lc.append(LineString([(1, 1), (3, 2)]))
>>> lc.append(LinearRing([(0, 0), (1, 0), (1, 1), (0, 1)]))
>>> lc.extend(MultiLineString([[(0, 0), (10, 0)], [(4, 4), (0, 4)]]))
>>> lc
LineCollection([array([5.+0.j, 5.+5.j]), array([1.+1.j, 3.+2.j]), array([0.+0.j,
1.+0.j, 1.+1.j, 0.+1.j, 0.+0.j]), array([ 0.+0.j, 10.+0.j]), array([4.+4.j, 0.+4.j])])
Instances can also be converted to Shapely's MultiLineString:
>>> mls = lc.as_mls()
>>> print(mls)
MULTILINESTRING ((5 0, 5 5), (1 1, 3 2), (0 0, 1 0, 1 1, 0 1, 0 0), (0 0, 10 0),
(4 4, 0 4))
Finally, :py:class:`LineCollection` implements a number of operations such as geometrical
transformation, cropping, merging, etc. (see member function documentation for details).
"""
def __init__(self, lines: LineCollectionLike = ()):
"""Create a LineCollection instance from an iterable of lines.
Args:
lines (LineCollectionLike): iterable of line (accepts the same input as
:func:`~LineCollection.append`).
"""
self._lines: List[np.ndarray] = []
self.extend(lines)
@property
def lines(self) -> List[np.ndarray]:
"""Returns the list of line.
Returns:
list of line
"""
return self._lines
def append(self, line: LineLike) -> None:
"""Append a single line.
This function accepts an iterable of complex or a Shapely geometry
(:py:class:`LineString` or :py:class:`LinearRing`).
Args:
line (LineLike): line to append
"""
if isinstance(line, LineString) or isinstance(line, LinearRing):
# noinspection PyTypeChecker
self._lines.append(np.array(line).view(dtype=complex).reshape(-1))
else:
line = np.array(line, dtype=complex).reshape(-1)
if len(line) > 1:
self._lines.append(line)
def extend(self, lines: LineCollectionLike) -> None:
"""Append lines from a collection.
This function accepts an iterable of iterable of complex, another
:py:class:`LineCollection` instance, or a Shapely geometry
(:py:class:`MultiLineString`, :py:class:`LineString` or :py:class:`LinearRing`).
Shapely's LineString and LinearRing are occasionally obtained when a MultiLineString is
actually expected. As a result, they are accepted as input even though they are not,
strictly speaking, a line collection.
Args:
lines (LineCollectionLike): lines to append
"""
if hasattr(lines, "geom_type") and lines.is_empty: # type: ignore
return
# sometimes, mls end up actually being ls
if isinstance(lines, LineString) or isinstance(lines, LinearRing):
lines = [lines]
for line in lines:
self.append(line)
def is_empty(self) -> bool:
"""Check for emptiness.
Returns:
True if the instance does not contain any line, False otherwise.
"""
return len(self) == 0
def reverse(self) -> None:
"""Reverse order of the lines."""
self._lines = list(reversed(self._lines))
def __iter__(self):
return self._lines.__iter__()
def __len__(self) -> int:
return len(self._lines)
def __getitem__(self, item: Union[int, slice]):
return self._lines[item]
def __repr__(self):
return f"LineCollection({self._lines})"
def as_mls(self) -> MultiLineString:
"""Converts the LineCollection to a :py:class:`MultiLineString`.
Returns:
a MultiLineString Shapely object
"""
return MultiLineString([as_vector(line) for line in self.lines])
def translate(self, dx: float, dy: float) -> None:
"""Translates all line by a given offset.
Args:
dx: offset along X axis
dy: offset along Y axis
"""
c = complex(dx, dy)
for line in self._lines:
line += c
def scale(self, sx: float, sy: Optional[float] = None) -> None:
"""Scale the geometry.
The scaling is performed about the coordinates origin (0, 0). To scale around a
specific location, appropriate translations must be performed before and after the
scaling::
>>> import vpype
>>> lc = vpype.LineCollection([(-1+1j, 1+1j)])
>>> lc.translate(0, -1)
>>> lc.scale(1.2)
>>> lc.translate(0, 1)
>>> lc
LineCollection([array([-1.2+1.j, 1.2+1.j])])
Args:
sx: scale factor along x
sy: scale factor along y (if None, then sx is used)
"""
if sy is None:
sy = sx
for line in self._lines:
line.real *= sx
line.imag *= sy
def rotate(self, angle: float) -> None:
"""Rotates the geometry by ``angle`` amount.
The angle is expressed in radian. Positive value rotate clockwise.
The rotation is performed about the coordinates origin (0, 0). To rotate around a
specific location, appropriate translations must be performed before and after the
scaling::
>>> import vpype
>>> lc = vpype.LineCollection([(-1+1j, 1+1j)])
>>> lc.translate(0, -1)
>>> lc.rotate(1.2)
>>> lc.translate(0, 1)
Args:
angle: rotation angle in rad
"""
c = complex(math.cos(angle), math.sin(angle))
for line in self._lines:
line *= c
def skew(self, ax: float, ay: float) -> None:
"""Skew the geometry by some angular amounts along X and Y axes.
The angle is expressed in radians.
The skew is performed about the coordinates origin (0, 0). To rotate around a
specific location, appropriate translations must be performed before and after the
scaling::
>>> import vpype
>>> lc = vpype.LineCollection([(-1+1j, 1+1j)])
>>> lc.translate(0, -1)
>>> lc.skew(0., 1.2)
>>> lc.translate(0, 1)
Args:
ax: skew angle in rad along X axis
ay: skew angle in rad along Y axis
"""
tx, ty = math.tan(ax), math.tan(ay)
for line in self._lines:
line += tx * line.imag + 1j * ty * line.real
def reloop(self, tolerance: float) -> None:
"""Randomizes the seam of closed paths. Paths are considered closed when their first
and last point are closer than *tolerance*.
:param tolerance: tolerance to determine if a path is closed
"""
for i, line in enumerate(self._lines):
delta = line[-1] - line[0]
if | np.hypot(delta.real, delta.imag) | numpy.hypot |
import matplotlib.pyplot as plt
import numpy as np
import pathlib
import pandas as pd
import random
import seaborn as sns
import sys
import warnings
from numba import jit
from numpy import linalg as la
from scipy.special import loggamma
from scipy.stats import chi2
from scipy.linalg import toeplitz, solve
from sklearn.preprocessing import scale
from timeit import default_timer as dt
from .tmath import cov2cor
from .utils import pretty_print_time
from .distributions import qnorm_approx
def _updateProgBarMCMC(curIter, totalIter, t0, ar, barLength=20):
"""Custom progress bar to output MCMC chain progress.
Parameters
----------
curIter : int
Current iteration.
totalIter : int
Total iterations.
t0 : float
Timestamp of when the process started (timestamp as float).
ar : float
Acceptance Ratio.
barLength : int, optional
The character length of the progress bar. The default is 20.
Returns
-------
None.
"""
status = "Working..."
progress = float(curIter)/float(totalIter)
if isinstance(progress, int):
progress = float(progress)
if progress >= 1:
progress = 1
status = "Finished!..."
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1:.2f}% iter: {2}/{3}, {4} Elapsed: {5}, Est: {6}, Accept. Rate: {7}".format(
"#"*block + "-"*(barLength - block),
round(progress*100.0, 2), curIter, totalIter, status, pretty_print_time(t0, dt()),
pretty_print_time((dt()-t0)/curIter * (totalIter - curIter)), np.round(ar, 3))
if progress >= 1:
sys.stdout.write(text + "\r\n")
sys.stdout.flush()
else:
sys.stdout.write(text)
sys.stdout.flush()
def applyMCMC(st, ni, lp, algo, algoOpts=None, postArgs={},
sd=0.02, max_tries=100):
"""This function iteratively applies the MCMC initialization. Since the MCMC
algorithms used here involve a cholesky decomposition, the methods
sometimes get stuck with a covaraince matrix that is not positive definite.
This will attempt to jitter the covariance matrix until it can initialize
properly.
Parameters
----------
st : numpy array
An array of the parameter starting values.
ni : int
NUmber of MCMC iterations.
lp : function
Function for the log posterior.
algo : function
MCMC algorithm to be performed.
algoOpts : dict, optional
Specific options for the MCMC algorithm. The default is None.
postArgs : dict, optional
Specific options for the posterior function. The default is None.
sd : float, optional
The standard deviation of the normal distribution used to draw the
jitter amount from. In other words, the jittered covariance is the
covaraince matrix plus a random draw X, where X~N(0, sd).
The default is 0.02.
max_tries : int, optional
The max number of times to try and jitter before admitting defeat.
If the jitter fails, the reason or the covaraince matrix not being
positive definite may not be due to randomness, and may require
a re-evaluation of the problem space. The default is 100.
Raises
------
ValueError
Raised when cholesky decomposition fails after max_tries.
Returns
-------
res : tuple
Returns tuple containing the MCMC results.
"""
try_num = 1
not_successful = True
res = None
lns = st.shape
while not_successful:
if try_num % 5 == 0:
st = st + np.random.normal(size=lns, scale=sd)
try:
res = algo(start=st, niter=ni, lpost=lp, postArgs=postArgs,
options=algoOpts)
not_successful = False
except np.linalg.LinAlgError:
try_num += 1
if try_num >= max_tries:
raise ValueError("Cholesky Decomposition was not successful after " + str(max_tries) + " tries. Try new starting values")
print("Number of Cholesky tries: " + str(try_num))
return res
# For upper triangle rank one update
@jit
def cholupdate(L, x, update=True):
"""Upper triangle, rank one update for cholesky decomposed matrix.
Parameters
----------
L : numpy array (float)
The upper-triangular decomposed matrix, shape=(N, N).
x : numpy array (float)
The values being added to L, shape=(N, ).
update : bool, optional
Perform an update (as opposed to a downdate). The default is True.
Returns
-------
L : numpy array
Return updated L matrix.
"""
p = len(x)
for k in range(p):
if update:
r = np.sqrt((L[k, k]**2) + (x[k]**2))
else:
r = np.sqrt((L[k, k]**2) - (x[k]**2))
c = r / L[k, k]
s = x[k] / L[k, k]
L[k, k] = r
if k < (p - 1):
if update:
L[k, (k + 1):p] = (L[k, (k + 1):p] + s * x[(k + 1):p]) / c
else:
L[k, (k + 1):p] = (L[k, (k + 1):p] - s * x[(k + 1):p]) / c
x[(k + 1):p] = c * x[(k + 1):p] - s * L[k, (k + 1):p]
return L
def adaptive_mcmc(start, niter, lpost, postArgs=None, options=None):
"""A random walk metropolis algorithm that adaptively tunes the covariance
matrix.
Based on methods by Rosenthal (who improved on Haario\'s method).
The method by Rosenthal is sometimes referred to as Adaptive Mixture
Metropolis, while the algorithm by Haario is called Adaptive Metropolis and
is generally considered to be the historically first adaptive Metropolis
algorithm.
Parameters
----------
start : numpy array
Starting values for the MCMC.
niter : int
Number of iterations.
lpost : function
Log posterior function.
postArgs : dict
Extra arguments for the log posterior function. The default is :obj:`None` . Parameter options include
- beta \: (:obj:`float`) Between 0 and 1. Decides the proportion to sample for each section of the mixture \
distribution. A mixture distribution is essentially like adding two distributions together. However, to \
avoid some complicated math, one way to sample from a mixture of two distributions is to use a trick, \
namely, to first sample from a uniform distribution between 0, 1, and then evaluate whether that value \
is above some threshold (beta in this case). If it is, sample from the first distribution, otherwise, \
sample from the second.
- progress \: (:obj:`bool`) Whether to display progress bar.
- prev_vals \: (:obj:`dict`) The previous values of the last run, namely:
- chol2 \: (:obj:`numpy array`) The decomposed covariance matrix of the parameters.
- sumx \: (:obj:`numpy array`) The current sum of the parameter value (for each parameter)
- prev_i \: (:obj:`int` or :obj:`float`) The number of samples represented in sumx. Used in averaging \
sumx
options : :obj:`dict`, optional
Extra arguments for the MCMC algorithm, such as
Returns
-------
parm : numpy array
MCMC samples.
prev_vals : dict
The ending values of the MCMC algorithm. Useful when you want to
continue where you left off.
Notes
-----
A random walk metropolis algorithm that adaptively tunes the covariance matrix. Based on methods by Rosenthal
(who improved on Haario's method). The method by Rosenthal is sometimes refered to as Adaptive Mixture Metropolis,
while the algorithm by Haario is called Adaptive Metropolis and is generally considered to be the historically first
adaptive Metropolis algorithm.
This method is identical to a Random Walk Metropolis algorithm, except that it adapts the covariance matrix after
each iteration based on the sample covariance of the entire chain up to the current iteration. The candidate values
in the MCMC chain are then sampled from a mixed density distribution defined as follows:
.. math:: Q_{n}(x, \\cdot) = (1 - \\beta)N(x, (2.38)^{2} \\Sigma_{n}/d) + \\beta N(x, (0.1)^{2} I_{d} / d)
* :math:`Q_{n}`: The proposal distribution
* :math:`N`: Normal Distribution
* :math:`\\beta`: some small constant, usually 0.05
* :math:`\\Sigma_{n}`: sample covariance up to the :math:`n^{th}` iteration
* :math:`d`: the number of parameters
* :math:`I_{d}`: the :math:`d \\times d` identity matrix
To sample from any density distribution, you must first calculate the inverse cdf of the function, otherwise
known as the quantile function. Assuming that there is a simple method to generate a uniform random number with
range :math:`[0, 1]` (most programming languages do), then calculating a random number from any distribution is
simple. First, generate the random uniform number, and use it as the input to the quantile function. However,
in many practical situations (such as with the normal distribution), calculating the quantile function is
non-trivial. The same is true for the above mixed density distribution. Therefore, to simplify sampling from
the mixed density distribution, use a uniform random number generator to sample a number :math:`U` between
:math:`[0, 1]`. Then, if :math:`U < \\beta`, then sample the candidate value from :math:`N(x, (2.38)^{2} \\Sigma_{n}/d)`.
Otherwise, sample from :math:`N(x, (0.1)^{2} I_{d} / d)`.
To calculate :math:`\\Sigma_{n}` efficiently, the algorithm uses the :func:`cholupdate` function. Let :math:`X` be
an :math:`m \\times n` matrix of MCMC samples. Then, the equation for population covariance can be defined as follows:
.. math:: \\Sigma_n = E(XX^T) - E(X)E(X^T) = \\frac{XX^{T}}{n} - \\mu \\mu^{T}
The algorithm calculates the cholesky distribution of :math:`XX^{T}` for the first four samples. From that point
on, :math:`XX^{T}` is updated using :func:`cholupdate` where `update=True`. If :math:`U < \\beta`, then
:math:`\\big( \\frac{XX^{T}}{n} - \\mu \\mu^{T} \\big)` is calculated using :func:`cholupdate(XXT/n, uuT, update=False)<cholupdate>`.
This algorithm uses the sample covariance, which can be calculated using the following trick:
.. math:: \\Big( \\frac{XX^{T}}{n} - \\mu \\mu^{T} \\Big) \\Big(\\frac{n}{n-1} \\Big)
The algorithm multiplies :math:`\\Sigma_{n}` by :math:`\\frac{2.38^2}{d}`, which makes the final covariance used
to create the candidate samples:
.. math:: \\Sigma_{n} = \\Big( \\frac{2.38^2}{d} \\Big) \\Big( \\frac{XX^{T}}{n} - \\mu \\mu^{T} \\Big) \\Big(\\frac{n}{n-1} \\Big)
"""
beta = 0.05
progress = True
prev_vals = {'chol2': None, 'sumx': 0.0, 'prev_i': 0.0}
if postArgs is None:
postArgs = {}
if options is not None:
keys = list(options.keys())
if 'beta' in keys:
beta = options['beta']
if 'progress' in keys:
progress = options['progress']
if 'prev_vals' in keys:
prev_vals.update(options['prev_vals'])
numParams = start.size
sqrtNumParams = np.sqrt(numParams)
parm = np.zeros(shape=(niter, numParams))
parm[0, ] = start
sumx = start + prev_vals['sumx']
accept = 0
post_old = lpost(start, **postArgs)
prop_dist_var = (0.1**2) * np.diag(np.repeat(1, numParams)) / numParams
chol1 = la.cholesky(prop_dist_var)
chol2 = prev_vals['chol2']
acceptDraw = False
loop = range(1, niter)
sumi = 1.0 + prev_vals['prev_i']
t0 = dt()
for i in loop:
parm[i, ] = parm[i - 1, ]
if i <= ((2 * numParams) - 1):
tune = chol1
else:
if chol2 is None:
XXt = parm[0:i, ].T.dot(parm[0:i, ])
chol2 = la.cholesky(XXt).T
else:
chol2 = cholupdate(chol2, np.array(parm[i - 1, ]))
if random.random() < beta:
tune = chol1
else:
tune = (2.38*cholupdate(chol2 / np.sqrt(sumi), sumx/sumi, update=False) / sqrtNumParams * np.sqrt(sumi / (sumi - 1)))
if np.any(np.isnan(tune)):
tune = chol1
cand = np.random.normal(size=numParams).dot(tune) + parm[i - 1, ]
post_new = lpost(cand, **postArgs)
if (post_new - post_old) > np.log(random.random()):
acceptDraw = True
if acceptDraw:
parm[i, ] = cand
post_old = post_new
accept += 1
sumx = sumx + parm[i, ]
sumi += 1.0
acceptDraw = False
if progress:
_updateProgBarMCMC(i + 1, niter, t0, float(accept) / float(i))
prev_vals = {'chol2': chol2, 'prev_i': sumi - 1, 'sumx': sumx}
print("Acceptance Rate: ", float(accept) / float(niter))
return {'parameters': parm, 'prev_vals': prev_vals}
def rwm_with_lap(start, niter, lpost, postArgs={}, options=None):
"""A random walk metropolis algorithm that adaptively tunes the covariance
matrix with a log-adaptive posterior.
See "Exploring an Adaptive Metropolis Algorithm" by <NAME>, 2010.
Parameters
----------
start : numpy array
Starting values for the MCMC.
niter : int
Number of iterations.
lpost : function
Log posterior function.
postArgs : dict
Extra arguments for the log posterior function. The default is
an empty dictionary.
options : dict, optional
Extra arguments for the MCMC algorithm, namely:
k : int
The number of MCMC samples to generate for each evaluation.
c0 : float
Attenuation parameter. Default is 1.
c1 : float
Attenuation parameter. Default is 0.8.
progress : bool
Whether to display progress bar
prev_vals : dict
The previous values of the last run, namely:
E_0 : numpy array
the final covariance matrix
sigma_2 : float
the positive scaling parameter in the algorithm
t : int
the current iteration number
The default is None.
Returns
-------
parm : numpy array
MCMC samples.
prev_vals : dict
The ending values of the MCMC algorithm. Useful when you want to continue where you left off.
"""
k = 20
c_0 = 1.0
c_1 = 0.8
progress = True
prev_vals = {'E_0': None, 'sigma_2': None, 't': 0.0}
if options is not None:
keys = list(options.keys())
if 'k' in keys:
k = options['k']
if 'c_0' in keys:
c_0 = options['c_0']
if 'c_1' in keys:
c_1 = options['c_1']
if 'progress' in keys:
progress = options['progress']
if 'prev_vals' in keys:
prev_vals.update(options['prev_vals'])
numParams = start.size
optimal = 0.444
if numParams >= 2:
optimal = 0.234
T_iter = np.ceil(niter/float(k))
niter = int(T_iter * k)
parm = np.zeros(shape=(niter, numParams))
parm[0, ] = start
total_accept = k_accept = 0
post_old = lpost(start, **postArgs)
sigma_2 = (2.38**2)/numParams
if prev_vals['sigma_2'] is not None:
sigma_2 = prev_vals["sigma_2"]
E_0 = np.diag(np.repeat(1, numParams))
if prev_vals['E_0'] is not None:
E_0 = prev_vals["E_0"]
chol = la.cholesky(np.sqrt(sigma_2)*E_0)
chol_i = np.array(chol)
t = 1 + prev_vals['t']
acceptDraw = False
loop = range(1, niter)
t0 = dt()
for i in loop:
parm[i, ] = parm[i - 1, ]
cand = np.random.normal(size=numParams).dot(chol) + parm[i - 1, ]
post_new = lpost(cand, **postArgs)
if (post_new - post_old) > np.log(random.random()):
acceptDraw = True
if acceptDraw:
parm[i, ] = cand
post_old = post_new
k_accept += 1
total_accept += 1
acceptDraw = False
if progress:
_updateProgBarMCMC(i + 1, niter, t0, float(total_accept) / float(i))
if (i + 1) % k == 0:
X = parm[(i + 1 - k):(i + 1), :]
mean_X = np.mean(X, axis=0)
r_t = k_accept / float(k)
Ehat_0 = (1.0 / (k - 1.0)) * ((X - mean_X).T.dot((X - mean_X)))
gamma_1 = 1/(t**c_1)
gamma_2 = c_0 * gamma_1
sigma_2 = np.exp(np.log(sigma_2) + (gamma_2 * (r_t - optimal)))
E_0 = E_0 + gamma_1*(Ehat_0 - E_0)
if np.any(np.isnan(E_0)) or not np.all(np.isfinite(E_0)):
chol = chol_i
else:
try:
chol = la.cholesky(np.sqrt(sigma_2)*E_0)
#except la.LinAlgError:
# chol = sla.sqrtm(sigma_2*E_0)
except:
chol = chol_i
t += 1
k_accept = 0
prev_vals = {'E_0': E_0, 'sigma_2': sigma_2, 't': t}
print("Acceptance Rate: ", float(total_accept) / float(niter))
return {'parameters': parm, 'prev_vals': prev_vals}
def rwm(start, niter, lpost, postArgs={}, options=None):
"""A random walk metropolis algorithm.
Parameters
----------
start : numpy array
Starting values for the MCMC.
niter : int
Number of iterations.
lpost : function
Log posterior function.
postArgs : dict
Extra arguments for the log posterior function. The default is
an empty dictionary.
options : dict, optional
Extra arguments for the MCMC algorithm, namely:
E : numpy array
The covariance matrix
progress : bool
Whether to display progress bar
prev_vals : dict
The previous values of the last run, namely:
E_0 : numpy array
the final covaraince matrix
The default is None.
Returns
-------
parm : numpy array
MCMC samples.
prev_vals : dict
The ending values of the MCMC algorithm. Useful when you want to
continue where you left off.
"""
numParams = start.size
prev_vals = {'E_0': ((2.38**2)/numParams)*np.diag(np.repeat(1, numParams))}
progress = True
if options is not None:
keys = list(options.keys())
if 'E' in keys:
E = options['E']
if 'progress' in keys:
progress = options['progress']
if 'prev_vals' in keys:
prev_vals.update(options['prev_vals'])
parm = np.zeros(shape=(niter, numParams))
parm[0, ] = start
accept = 0
post_old = lpost(start, **postArgs)
E = prev_vals['E_0']
chol = la.cholesky(E)
acceptDraw = False
loop = range(1, niter)
t0 = dt()
for i in loop:
parm[i, ] = parm[i - 1, ]
cand = np.random.normal(size=numParams).dot(chol) + parm[i - 1, ]
post_new = lpost(cand, **postArgs)
if (post_new - post_old) > np.log(random.random()):
acceptDraw = True
if acceptDraw:
parm[i, ] = cand
post_old = post_new
accept += 1
acceptDraw = False
if progress:
_updateProgBarMCMC(i + 1, niter, t0, float(accept) / float(i))
prev_vals = {'E_O': E}
print("Acceptance Rate: ", float(accept) / float(niter))
return {'parameters': parm, 'prev_vals': prev_vals}
def samp_size_calc_raftery(q=0.025, r=0.005, s=0.95):
"""Calculate needed sample size for Raftery evaluation.
Parameters
----------
q : float, optional
Quantile of interest (in terms of percentile, i.e. between 0 and 1).
The default is 0.025.
r : float, optional
Accuracy. The default is 0.005.
s : float, optional
Probability. The default is 0.95.
Returns
-------
phi : float
Phi parameter in Raftery evaluation.
nmin : int
Minimum number of samples needed.
"""
phi = qnorm_approx(0.5 * (1.0 + s))
nmin = np.ceil((q * (1.0 - q) * phi**2)/r**2).astype(int)
return phi, nmin
def lag(s, b, method):
"""Translated from R's mcmcse package
Returns the lag window value for the corresponding window.
Parameters
----------
s : int
Current block number.
b : int
Number of blocks.
method : str
Either `bartlett` or None.
Returns
-------
int, float
Lag window.
"""
if method == "bartlett":
return 1 - (s/b)
else:
return (1 + np.cos(np.pi * s/b))/2
def adjust_matrix(mat, N, epsilon=None, b=9/10):
"""Translated from R's mcmcse package.
Function adjusts a non-positive definite estimator to be positive definite.
Parameters
----------
mat : numpy array
A symmetric pxp matrix, usually a covarince matrix.
N : int
Number of observations in the original atrix.
epsilon : float, optional
The adjustment size. If None, sqrt(log(N)/p).
The default is None.
b : float, optional
The exponent on N for the adjustment. The default is 9/10.
Returns
-------
mat_adj : numpy array
Adjusted matrix.
"""
if epsilon is None:
epsilon = np.sqrt(np.log(N)/mat.shape[1])
mat_adj = mat
adj = epsilon*N**(-b)
var = np.diag(mat)
corr = cov2cor(mat)
eig_val, eig_vec = np.linalg.eig(corr)
adj_eigs = np.maximum(eig_val, adj)
mat_adj = np.diag(var**0.5).dot(eig_vec).dot( | np.diag(adj_eigs) | numpy.diag |
from numpy import zeros, ones, dot, sum, abs, max, argmax, clip, \
random, prod, asarray, set_printoptions, unravel_index
# Generate a random uniform number (array) in range [0,1].
def zero(*shape): return zeros(shape)
def randnorm(*shape): return random.normal(size=shape)
def randuni(*shape): return random.random(size=shape)
def randint(*shape, min=-3, max=9):
data = asarray( | random.randint(min+1,max+1,size=shape) | numpy.random.randint |
import sqlite3
import numpy as np
import logging
import matplotlib.pyplot as plt
from scipy.integrate import simps
from scipy.interpolate import griddata
from .config import *
import os
import datetime
from .star import Star
logger = logging.getLogger(__name__)
class SolvePars:
def __init__(self, key_parameter_known='plx',
db='yy02.sql3', feh_offset = 0,
nsigma=5, window_len_age=13):
self.key_parameter_known = key_parameter_known
self.get_isochrone_points_db = db
self.feh_offset = feh_offset
self.get_isochrone_points_nsigma = nsigma
self.smooth_window_len_age = window_len_age
self.smooth_window_len_mass = 0
self.smooth_window_len_logl = 0
self.smooth_window_len_mv = 0
self.smooth_window_len_r = 0
self.smooth_window_len_logg = 0
self.bayesian = False
def __init__(self, key_parameter_known='logg&plx',
db='yy02.sql3', feh_offset = 0,
nsigma=5, window_len_age=13):
self.key_parameter_known = key_parameter_known
self.get_isochrone_points_db = db
self.feh_offset = feh_offset
self.get_isochrone_points_nsigma = nsigma
self.smooth_window_len_age = window_len_age
self.smooth_window_len_mass = 0
self.smooth_window_len_logl = 0
self.smooth_window_len_mv = 0
self.smooth_window_len_r = 0
self.smooth_window_len_logg = 0
self.bayesian = False
class PlotPars:
def __init__(self, figure_format='png', directory="", make_figures=True):
self.age_xlim = [0, 14]
self.mass_xlim = None
self.logl_xlim = None
self.mv_xlim = None
self.r_xlim = None
self.logg_xlim = None
self.directory = directory
self.figure_format = figure_format
self.title_inside = None
self.make_figures = make_figures
self.make_age_plot = False
self.make_nearest_plot = False
def pdf(pdf_x, ips, prob, par, smooth_window_len):
'''Calculates a probability distribution function (PDF) for parameter par
given the x-values for the PDF, the isochrone points ips, and their
probability. Return PDF and smoothed PDF (using smooth_window_len) if
possible (otherwise returns two non-smoothed PDFs), as well as a stats
dictionary with mean, std, most probable value, etc.
'''
dx = 0.5*(pdf_x[1] - pdf_x[0])
pdf_y = []
for x in pdf_x:
pdf_y.append(sum(prob[np.logical_and(ips[par] >= x-dx,
ips[par] < x+dx)]))
pdf_y = np.array(pdf_y)
pdf_y = pdf_y/simps(pdf_y, pdf_x)
try:
pdf_y_smooth = smooth(pdf_y, smooth_window_len)
#pdf_y_smooth = savitzky_golay(pdf_y, smooth_window_len, 2)
pdf_y_smooth = pdf_y_smooth/simps(pdf_y_smooth, pdf_x)
except:
pdf_y_smooth = pdf_y
logger.warning('Unable to smooth '+par+' PDF.')
stats = get_stats(pdf_x, pdf_y_smooth)
if stats['most_probable'] is not None:
print("{0:10s} {1:6.3f} | {2:6.3f} - {3:6.3f} | "\
"{4:6.3f} - {5:6.3f} | {6:6.3f} +/- {7:6.3f}"\
.format(par,
stats['most_probable'],
stats['lower_limit_1sigma'],
stats['upper_limit_1sigma'],
stats['lower_limit_2sigma'],
stats['upper_limit_2sigma'],
stats['mean'], stats['std']))
else:
print("{0:10s} | - | "\
" - | {1:6.3f} +/- {2:6.3f}"\
.format(par, stats['mean'], stats['std']))
logger.warning("--- Unable to calculate PDF stats for "+par)
return pdf_y, pdf_y_smooth, stats
def get_stats(pdf_x, pdf_y_smooth):
stats = {}
stats['most_probable'] = \
np.mean(np.array(pdf_x)[pdf_y_smooth == max(pdf_y_smooth)])
stats['mean'] = simps(pdf_y_smooth*pdf_x, pdf_x)
stats['std'] = np.sqrt(simps(pdf_y_smooth*(pdf_x-stats['mean'])**2,\
pdf_x))
k = pdf_x <= stats['most_probable']
pdf_y_left = 0.5*pdf_y_smooth[k]/simps(pdf_y_smooth[k], pdf_x[k])
pdf_x_left = pdf_x[k]
areas_left = []
for x in pdf_x_left:
areas_left.append(simps(pdf_y_left[pdf_x_left <= x],
pdf_x_left[pdf_x_left <= x]))
areas_left = np.array(areas_left)
if np.mean(areas_left) == 0:
logger.warning("Left side of distribution is empty")
stats['most_probable'] = None
stats['lower_limit_1sigma'] = None
stats['lower_limit_2sigma'] = None
stats['upper_limit_1sigma'] = None
stats['upper_limit_2sigma'] = None
return stats
k = pdf_x >= stats['most_probable']
pdf_y_right = 0.5*pdf_y_smooth[k]/simps(pdf_y_smooth[k], pdf_x[k])
pdf_x_right = pdf_x[k]
areas_right = []
for x in pdf_x_right:
areas_right.append(simps(pdf_y_right[pdf_x_right <= x],
pdf_x_right[pdf_x_right <= x]))
areas_right = np.array(areas_right)
try:
stats['lower_limit_1sigma'] = \
np.mean(griddata(areas_left, pdf_x_left, 0.158))
stats['lower_limit_2sigma'] = \
np.mean(griddata(areas_left, pdf_x_left, 0.022))
stats['upper_limit_1sigma'] = \
np.mean(griddata(areas_right, pdf_x_right, 0.341))
stats['upper_limit_2sigma'] = \
np.mean(griddata(areas_right, pdf_x_right, 0.477))
except:
stats['lower_limit_1sigma'] = -9.999
stats['lower_limit_2sigma'] = -9.999
stats['upper_limit_1sigma'] = -9.999
stats['upper_limit_2sigma'] = -9.999
return stats
def solve_one(Star, SolvePars, PlotPars=PlotPars(), isochrone_points=None):
'''Calculates most likely parameters of Star using isochrone points
'''
if hasattr(Star, 'feh_model'):
Star.old_feh = Star.feh
Star.feh = getattr(Star, 'feh_model')
if SolvePars.key_parameter_known == 'plx':
Star.get_absolute_magnitude()
if SolvePars.key_parameter_known == 'logg&plx':
Star.get_absolute_magnitude()
if not isochrone_points:
ips = get_isochrone_points(Star, SolvePars.feh_offset,
SolvePars.get_isochrone_points_db,
SolvePars.get_isochrone_points_nsigma,
SolvePars.key_parameter_known)
else:
ips = isochrone_points
if ips == None:
logger.warning('Could not get any isochrone points.')
return None
print('Using {0} isochrone points\n'.format(len(ips['age'])))
print('Parameter m.p. | 1-sigma range | 2-sigma range | mean +/- stdev')
print('---------- ------ | --------------- | --------------- | -----------------')
logger.info('Using {0} Y2 isochrone points'.format(len(ips['age'])))
Star.isokeyparameterknown = SolvePars.key_parameter_known
Star.isonpoints = len(ips['age'])
ips['t'] = 10**ips['logt']
ips['r'] = 10**(0.5*(np.log10(ips['mass'])-ips['logg']+4.437))
if SolvePars.key_parameter_known == 'logg':
prob = np.exp(-1*((ips['t']-Star.teff)/ \
(1.414214*Star.err_teff))**2)* \
np.exp(-1*((ips['logg']-Star.logg)/ \
(1.414214*Star.err_logg))**2)* \
np.exp(-1*((ips['feh']-Star.feh)/ \
(1.414214*Star.err_feh))**2)
if SolvePars.key_parameter_known == 'plx':
prob = np.exp(-1*((ips['t']-Star.teff)/ \
(1.414214*Star.err_teff))**2)* \
np.exp(-1*((ips['mv']-Star.M_V)/ \
(1.414214*Star.err_M_V))**2)* \
np.exp(-1*((ips['feh']-Star.feh)/ \
(1.414214*Star.err_feh))**2)
if SolvePars.key_parameter_known == 'logg&plx':
prob = np.exp(-1*((ips['t']-Star.teff)/ \
(1.414214*Star.err_teff))**2)* \
np.exp(-1*((ips['logg']-Star.logg)/ \
(1.414214*Star.err_logg))**2)* \
np.exp(-1*((ips['mv']-Star.M_V)/ \
(1.414214*Star.err_M_V))**2)* \
np.exp(-1*((ips['feh']-Star.feh)/ \
(1.414214*Star.err_feh))**2)
if SolvePars.key_parameter_known == 'rho':
ips['rho'] = ips['mass']/(ips['r']**3)
prob = np.exp(-1*((ips['t']-Star.teff)/ \
(1.414214*Star.err_teff))**2)* \
np.exp(-1*((ips['rho']-Star.rho)/ \
(1.414214*Star.err_rho))**2)* \
np.exp(-1*((ips['feh']-Star.feh)/ \
(1.414214*Star.err_feh))**2)
if SolvePars.bayesian:
prob *= ips['mass']**(-2.7)
#age
ages = 0.1+np.arange(200)*0.1
pdf_age_x = ages[np.logical_and(ages >= min(ips['age'])-0.2,
ages <= max(ips['age'])+0.2)]
pdf_age_y, pdf_age_y_smooth, Star.isoage = \
pdf(pdf_age_x, ips, prob, 'age', SolvePars.smooth_window_len_age)
Star.pdf_age = {'x': pdf_age_x, 'y': pdf_age_y, 'ys': pdf_age_y_smooth}
#mass
masses = 0.2+ | np.arange(311) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
import skeleton as skel
import skeleton_matching as skm
import robust_functions as rf
import helperfunctions as hf
import visualize as vis
Obs = namedtuple('Observation', 'g q')
def register_skeleton(S1, S2, corres, params):
"""
This function computes the (non-rigid) registration params between the
two skeletons. This function computes the normal equation, solves the
non-linear least squares problem.
Parameters
----------
S1, S2 : Skeleton Class
Two skeletons for which we compute the non-rigid registration params
corres : numpy array (Mx2)
correspondence between two skeleton nodes
params : Dictionary
num_iter : Maximum number of iterations for the optimization routine
default: 10
w_rot : Weight for the rotation matrix constraints
default: 100,
w_reg : Weight for regularization constraints
default: 100
w_corresp: Weight for correspondence constraints
default: 1
w_fix : Weight for fixed nodes
default: 1
fix_idx : list of fixed nodes
default : []
R_fix : list of rotation matrices for fixed nodes
default : [np.eye(3)]
t_fix : list of translation vectors for fixed nodes
default: [np.zeros((3,1))]
use_robust_kernel : Use robust kernels for optimization (recommended if corres has outliers)
default : True
robust_kernel_type : Choose a robust kernel type (huber, cauchy, geman-mcclure)
default: 'cauchy'
robust_kernel_param : scale/outlier parameter for robust kernel
default: 2
debug : show debug visualizations + info
default: False
Returns
-------
T12 : list of 4x4 numpy arrays
Affine transformation corresponding to each node in S1
"""
print('Computing registration params.')
# set default params if not provided
if 'num_iter' not in params:
params['num_iter'] = 10
if 'w_rot' not in params:
params['w_rot'] = 100
if 'w_reg' not in params:
params['w_reg'] = 100
if 'w_corresp' not in params:
params['w_corresp'] = 1
if 'w_fix' not in params:
params['w_fix'] = 1
if 'fix)idx' not in params:
params['fix_idx'] = []
if 'use_robust_kernel' not in params:
params['use_robust_kernel'] = False
if 'robust_kernel_type' not in params:
params['robust_kernel_type'] = 'cauchy'
if 'robust_kernel_param' not in params:
params['robust_kernel_param'] = 2
if 'debug' not in params:
params['debug'] = False
# initialize normal equation
J_rot, r_rot, J_reg, r_reg, J_corresp, r_corresp, J_fix, r_fix = \
initialize_normal_equations(S1, corres, params)
# initialze solution
x = initialize_solution(S1, params)
# initialize weights
W_rot, W_reg, W_corresp, W_fix = initialize_weight_matrices(\
params['w_rot'], len(r_rot), params['w_reg'], len(r_reg), \
params['w_corresp'], len(r_corresp) , params['w_fix'], len(r_fix))
# # initialize variables in optimization
m = S1.XYZ.shape[0]
T12 = [None]*m
R = [None]*m
t = [None]*m
for j in range(m):
xj = x[12*j:12*(j+1)]
R[j] = np.reshape(xj[0:9], (3,3))
t[j] = xj[9:12]
# perform optimization
if params['debug']:
fh_debug = plt.figure()
E_prev = np.inf;
dx_prev = np.inf;
for i in range(params['num_iter']):
# counters used for different constraints
jk = 0
jc = 0
jf = 0
# compute jacobian and residual for each constraint types
for j in range(m):
# registration params for jth node
Rj = R[j]
tj = t[j]
# constraints from rotation matrix entries
Jj_rot, rj_rot = compute_rotation_matrix_constraints(Rj)
J_rot[6*j:6*(j+1), 12*j:12*(j+1)] = Jj_rot
r_rot[6*j:6*(j+1)] = rj_rot
# constraints from regularization term
ind = np.argwhere(S1.A[j,:]==1).flatten()
for k in range(np.sum(S1.A[j,:])):
# params
Rk = R[ind[k]]
tk = t[ind[k]]
Jj_reg, Jk_reg, r_jk_reg = compute_regularization_constraints(Rj, tj, Rk, tk)
# collect all constraints
nc = r_jk_reg.shape[0]
J_reg[nc*jk : nc*(jk+1), 12*j:12*(j+1)] = Jj_reg
J_reg[nc*jk : nc*(jk+1), ind[k]*12:12*(ind[k]+1)] = Jk_reg
r_reg[nc*jk : nc*(jk+1)] = r_jk_reg
# increment counter for contraints from neighbouring nodes
jk = jk+1
# constraints from correspondences
if corres.shape[0] > 0:
ind_C = np.argwhere(corres[:,0] == j).flatten()
if len(ind_C) > 0:
# observations
Y = Obs(S1.XYZ[j,:].reshape(3,1), S2.XYZ[corres[ind_C,1],:].reshape(3,1))
# compute constraints
J_jc_corresp, r_jc_corresp = compute_corresp_constraints(Rj, tj, Y)
# collect all constraints
nc = r_jc_corresp.shape[0]
J_corresp[nc*jc:nc*(jc+1), 12*j:12*(j+1)] = J_jc_corresp
r_corresp[nc*jc:nc*(jc+1)] = r_jc_corresp
# increment counter for correspondence constraints
jc = jc + 1
# constraints from fixed nodes
if len(params['fix_idx']) > 0:
if j in params['fix_idx']:
ind_f = params['fix_idx'].index(j)
# observations
R_fix = params['R_fix'][ind_f]
t_fix = params['t_fix'][ind_f]
# compute fix node constraints
J_jf_fix, r_jf_fix = compute_fix_node_constraints(Rj, tj, R_fix, t_fix);
nc = r_jf_fix.shape[0]
J_fix[nc*jf: nc*(jf+1), 12*j:12*(j+1)] = J_jf_fix
r_fix[nc*jf:nc*(jf+1)] = r_jf_fix
# update counter
jf = jf + 1
# compute weights and residual using robust kernel
if params['use_robust_kernel']:
if params['robust_kernel_type'] == 'huber':
_, _, W_corresp = rf.loss_huber(r_corresp, params['robust_kernel_param'])
elif params['robust_kernel_type'] == 'cauchy':
_, _, W_corresp = rf.loss_cauchy(r_corresp, params['robust_kernel_param'])
elif params['robust_kernel_type'] == 'geman_mcclure':
_, _, W_corresp = rf.loss_geman_mcclure(r_corresp, params['robust_kernel_param'])
else:
print('Robust kernel not undefined. \n')
W_corresp = params['w_corresp']*np.diag(W_corresp.flatten())
# collect all constraints
J = np.vstack((J_rot, J_reg, J_corresp, J_fix))
r = np.vstack((r_rot, r_reg, r_corresp, r_fix))
# construct weight matrix
W = combine_weight_matrices(W_rot, W_reg, W_corresp, W_fix)
# solve linear system
A = J.T @ W @ J
b = J.T @ W @ r
dx = -np.linalg.solve(A, b)
# Errors
E_rot = r_rot.T @ W_rot @ r_rot
E_reg = r_reg.T @ W_reg @ r_reg
E_corresp = r_corresp.T @ W_corresp @ r_corresp
E_fix = r_fix.T @ W_fix @ r_fix
E_total = E_rot + E_reg + E_corresp + E_fix
# print errors
if params['debug']:
print("Iteration # ", i)
print("E_total = ", E_total)
print("E_rot = ", E_rot)
print("E_reg = ", E_reg)
print("E_corresp = ", E_corresp)
print("E_fix = ", E_fix)
print("Rank(A) = ", np.linalg.matrix_rank(A))
# update current estimate
for j in range(m):
#params
dx_j = dx[12*j:12*(j+1)]
R[j] = R[j] + np.reshape(dx_j[0:9], (3, 3), order = 'F')
t[j] = t[j] + dx_j[9:12]
# collect and return transformation
for j in range(m):
T12[j] = hf.M(R[j], t[j])
# apply registration to skeleton for visualization
if params['debug']:
# compute registration error
S2_hat = apply_registration_params_to_skeleton(S1, T12)
vis.plot_skeleton(fh_debug, S1,'b');
vis.plot_skeleton(fh_debug, S2,'r');
vis.plot_skeleton(fh_debug, S2_hat,'k');
vis.plot_skeleton_correspondences(fh_debug, S2_hat, S2, corres)
plt.title("Iteration " + str(i))
# exit criteria
if np.abs(E_total - E_prev) < 1e-6 or np.abs(np.linalg.norm(dx) - np.linalg.norm(dx_prev)) < 1e-6:
print("Exiting optimization.")
print('Total error = ', E_total)
break
# update last solution
E_prev = E_total
dx_prev = dx
return T12
def initialize_normal_equations(S, corres, params):
"""
This function initailizes J and r matrices for different types of % constraints.
Parameters
----------
S : Skeleton Class
Contains points, adjacency matrix etc related to the skeleton graph.
corres : numpy array (Mx2)
correspondence between two skeleton nodes
params : Dictionary
see description in register_skeleton function
Returns
-------
J_rot : numpy array [6mx12m]
jacobian for rotation matrix error
r_rot : numpy array [6mx1]
residual for rotation matrix error
J_reg : numpy array [12mx12m]
jacobian for regularization error
r_reg : numpy array [12mx1]
residual for reuglarization error
J_corres : numpy array [3nCx12m]
jacobian for correspondence error
r_corres : numpy array [3nCx1]
residual for correspondence error
J_fix : numpy array[12nFx12m]
jacobian for fix nodes
r_fix : numpy array [12mFx1]
residual for fix nodes
"""
# get sizes from input
m = S.XYZ.shape[0]
nK = 2*S.edge_count
nC = corres.shape[0]
nF = len(params['fix_idx'])
# constraints from individual rotation matrix
num_rot_cons = 6*m
J_rot = np.zeros((num_rot_cons, 12*m))
r_rot = np.zeros((num_rot_cons,1))
# constraints from regularization
num_reg_cons = 12*nK
J_reg = np.zeros((num_reg_cons,12*m))
r_reg = np.zeros((num_reg_cons,1))
# constraints from correspondences
num_corres_cons = 3*nC;
J_corres = np.zeros((num_corres_cons,12*m))
r_corres = np.zeros((num_corres_cons,1))
# constraints from fix nodes
num_fix_cons = 12*nF
J_fix = np.zeros((num_fix_cons,12*m))
r_fix = np.zeros((num_fix_cons,1))
return J_rot, r_rot, J_reg, r_reg, J_corres, r_corres, J_fix, r_fix
def initialize_solution(S, params):
"""
This function initialzes the soultion either as the zero solution or
provided initial transformation.
Parameters
----------
S : Skeleton Class
Only used for getting number of unknowns.
params : Dictionary
R_init, params.t_init used for initializing solution if
they are provided. If R_init is a list then a separate approximate is
assumed for every transformation. Otherwise R_init should be 3x3, t_init 3x1
Returns
-------
x : numpy array [12mx1]
initial solution vector as expected by the optimization procedure.
"""
m = S.XYZ.shape[0]
x = np.zeros((12*m,1))
R = [None]*m
t = [None]*m
for j in range(m):
if 'R_init' in params and 't_init' in params:
if len(params['R_init']) == m:
R[j] = params['R_init'][j]
t[j] = params['t_init'][j]
else:
R[j] = params['R_init']
t[j] = params['t_init']
else:
# start from zero solution
R[j] = np.eye(3);
t[j] = np.zeros((3,1))
# rearrange in a column vector
x[12*j:12*(j+1)] = np.vstack((np.reshape(R[j], (9, 1),order='F'),t[j]))
return x
def initialize_weight_matrices(w_rot, n_rot, w_reg, n_reg, w_corresp, n_corresp, w_fix, n_fix):
"""
This function computes the weight matrices corresponding to each constraint
given the weights and number of constraints for each type.
"""
W_rot = np.diag(w_rot*np.ones(n_rot))
W_reg = np.diag(w_reg*np.ones(n_reg))
W_corresp = np.diag(w_corresp*np.ones(n_corresp))
W_fix = np.diag(w_fix*np.ones(n_fix))
return W_rot, W_reg, W_corresp, W_fix
def combine_weight_matrices(W_rot, W_reg, W_corresp, W_fix):
"""
This function combines the weight matrices of each constraint type into
the combined weight matrix used in the optimization step.
"""
# get number of constraints
n_rot = W_rot.shape[0]
n_reg = W_reg.shape[0]
n_corresp = W_corresp.shape[0]
n_fix = W_fix.shape[0]
nC = n_rot + n_reg + n_corresp + n_fix
# combine them to form the big W matrix
W = np.zeros((nC, nC))
W[0:n_rot, 0:n_rot] = W_rot
W[n_rot:n_rot+n_reg, n_rot:n_rot+n_reg] = W_reg
W[n_rot+n_reg:n_rot+n_reg+n_corresp, n_rot+n_reg:n_rot+n_reg+n_corresp] = W_corresp
W[n_rot+n_reg+n_corresp:n_rot+n_reg+n_corresp+n_fix, n_rot+n_reg+n_corresp:n_rot+n_reg+n_corresp+n_fix] = W_fix
return W
def compute_rotation_matrix_constraints(R):
# constraints from rotation matrix entries
c1 = R[0:3,0].reshape((3,1))
c2 = R[0:3,1].reshape((3,1))
c3 = R[0:3,2].reshape((3,1))
# # Jacobian wrt R (1x9), wrt t (1x3)
r1 = c1.T @ c2
Jc_r1 = np.hstack((c2.T, c1.T, np.zeros((1,3))))
Jt_r1 = np.zeros((1,3))
r2 = c1.T @ c3
Jc_r2 = np.hstack((c3.T, np.zeros((1,3)), c1.T))
Jt_r2 = np.zeros((1,3))
r3 = c2.T @ c3
Jc_r3 = np.hstack((np.zeros((1,3)), c3.T, c2.T))
Jt_r3 = np.zeros((1,3))
r4 = c1.T @ c1 -1
Jc_r4 = np.hstack((2*c1.T, np.zeros((1,3)), np.zeros((1,3))))
Jt_r4 = np.zeros((1,3))
r5 = c2.T @ c2 -1
Jc_r5 = np.hstack((np.zeros((1,3)), 2*c2.T, np.zeros((1,3))))
Jt_r5 = np.zeros((1,3))
r6 = c3.T @ c3 -1
Jc_r6 = np.hstack((np.zeros((1,3)), np.zeros((1,3)), 2*c3.T))
Jt_r6 = np.zeros((1,3))
# J:= 6x12, r:= 6x1
J = np.vstack((np.hstack((Jc_r1, Jt_r1)),
np.hstack((Jc_r2, Jt_r2)),
np.hstack((Jc_r3, Jt_r3)),
np.hstack((Jc_r4, Jt_r4)),
np.hstack((Jc_r5, Jt_r5)),
np.hstack((Jc_r6, Jt_r6))))
r = np.vstack((r1,
r2,
r3,
r4,
r5,
r6))
return J, r
def compute_regularization_constraints(Rj, tj, Rk, tk):
# Transformations
Tj = hf.M(Rj, tj)
Tk = hf.M(Rk, tk)
# residual (12x1)
r_eye = Tj @ np.linalg.inv(Tk)
r = np.vstack((np.reshape(r_eye[0:3,0:3]- np.eye(3), (9, 1), order='F'), np.reshape(r_eye[0:3,3],(3,1), order='F')))
# jacobian
x1 = np.vstack((np.reshape(Rj, (9, 1)), tj))
x2 = np.vstack((np.reshape(Rk, (9, 1)), tk))
Jj = hf.jacobian(residual_reg, 1e-6, [0], x1, x2)
Jk = hf.jacobian(residual_reg, 1e-6, [1], x1, x2)
return Jj, Jk, r
def compute_corresp_constraints(R, t, Y):
# residual 3x1
r = R @ Y.g + t - Y.q
# JR=(3x9), Jt=(3x3)
JR = np.hstack((Y.g[0]*np.eye(3), Y.g[1]*np.eye(3), Y.g[2]*np.eye(3)))
Jt = np.eye(3)
J = np.hstack((JR, Jt))
return J, r
def compute_fix_node_constraints(R, t, R_fix, t_fix):
# error (12x1), Jc (12x9), Jt( 12x3)
r = np.vstack((np.reshape(R, (9, 1), order='F'), t)) -np.vstack(( | np.reshape(R_fix, (9, 1), order='F') | numpy.reshape |
"""
Training step for the paper: four labels + Ak + C + N
"""
import numpy as np
import glob
import matplotlib.pyplot as plt
import sys
import pyfits
#sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon/TheCannon')
#sys.path.insert(0, '/home/annaho/aida41040/annaho/TheCannon')
from TheCannon import dataset
from TheCannon import model
from TheCannon import lamost
from astropy.table import Table
from matplotlib.colors import LogNorm
from matplotlib import rc
rc('font', family='serif')
rc('text', usetex=True)
import os
GIT_DIR = "/Users/annaho/Dropbox/Research/TheCannon/"
DATA_DIR = GIT_DIR + "data/"
SPEC_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age/with_col_mask/xval_with_cuts"
#SPEC_DIR = "."
def load_data():
print("Loading all data")
DIR = GIT_DIR + DATA_DIR
a = pyfits.open("%s/labels_file_full.fits" %DIR)
tbl = a[1].data
a.close()
# Pull out all APOGEE DR12 values
# FPARAM: (teff, logg, rvel, mh, c, n, alpha)
teff_all = tbl['FPARAM'][:,0]
logg_all = tbl['FPARAM'][:,1]
mh_all = tbl['FPARAM'][:,3]
cm_all = tbl['FPARAM'][:,4]
nm_all = tbl['FPARAM'][:,5]
am_all = tbl['FPARAM'][:,6]
ak_all = tbl['AK_WISE']
# Discard objects with Teff > 4550 if -1 < [M/H] < -0.5
print("Discarding objects")
choose_teff = teff_all > 4550
choose_mh = np.logical_and(-1 < mh_all, mh_all < -0.5)
discard_teff = np.logical_and(choose_mh, choose_teff) # 743 objects
# Discard objects with [C/M] < -0.4 dex
discard_cm = cm_all < -0.4 # 40 objects
# metal-poor stars [M/H] < -0.1 have sketchy scaling relations
# but this shouldn't affect our spectral C and N
# in Marie's paper they don't have any low-metallicity stars,
# but it doesn't matter for the training anyway.
bad = np.logical_or(discard_teff, discard_cm)
choose = ~bad
ref_id = tbl['lamost_id'][choose]
ref_id = np.array([val.strip() for val in ref_id]).astype(str)
ref_label = np.vstack((
teff_all[choose], logg_all[choose], mh_all[choose],
cm_all[choose], nm_all[choose], am_all[choose],
ak_all[choose])).T
np.savez("./ref_id.npz", ref_id)
np.savez("./ref_label.npz", ref_label)
print("Getting spectra")
all_id = np.load("%s/tr_id.npz" %SPEC_DIR)['arr_0'].astype(str)
all_flux = np.load("%s/tr_flux.npz" %SPEC_DIR)['arr_0']
all_ivar = np.load("%s/tr_ivar.npz" %SPEC_DIR)['arr_0']
choose = np.array([np.where(all_id==f)[0][0] for f in ref_id])
flux = all_flux[choose,:]
ivar = all_ivar[choose,:]
np.savez("ref_flux.npz", flux)
np.savez("ref_ivar.npz", ivar)
def train():
wl = np.load("%s/../wl_cols.npz" %SPEC_DIR)['arr_0']
tr_id = np.load("%s/ref_id.npz" %SPEC_DIR)['arr_0']
tr_label = np.load("%s/ref_label.npz" %SPEC_DIR)['arr_0']
tr_label = tr_label[:,0:3]
tr_flux = np.load("%s/ref_flux.npz" %SPEC_DIR)['arr_0']
tr_ivar = np.load("%s/ref_ivar.npz" %SPEC_DIR)['arr_0']
ds = dataset.Dataset(
wl, tr_id, tr_flux, tr_ivar, tr_label,
tr_id, tr_flux, tr_ivar)
# teff, logg, mh, cm, nm, am, ak
ds.set_label_names(
['T_{eff}', '\log g', '[Fe/H]']) #, '[C/M]','[N/M]',
#'[\\alpha/M]', 'A_k'])
#ds.diagnostics_SNR()
#ds.diagnostics_ref_labels()
#np.savez("ref_snr.npz", ds.tr_SNR)
print("Training model")
nlab = ds.tr_label.shape[1]
print(nlab)
npix = len(ds.wl)
print(npix)
filt = np.ones((nlab, npix), dtype=bool)
print(filt)
#filt[nlab-1,0:500] = 0
m = model.CannonModel(2, wl_filter = filt)
m.fit(ds)
np.savez("./coeffs.npz", m.coeffs)
np.savez("./scatters.npz", m.scatters)
np.savez("./chisqs.npz", m.chisqs)
np.savez("./pivots.npz", m.pivots)
m.diagnostics_leading_coeffs(ds)
#m.diagnostics_leading_coeffs_triangle(ds)
#m.diagnostics_plot_chisq(ds)
def test_step_iteration(ds, m, starting_guess):
errs, chisq = m.infer_labels(ds, starting_guess)
return ds.test_label_vals, chisq, errs
def test_step():
#wl = np.load("%s/wl.npz" %SPEC_DIR)['arr_0']
wl = np.load("wl_cols.npz")['arr_0']
test_id = np.load("%s/ref_id.npz" %SPEC_DIR)['arr_0']
tr_label = np.load("%s/ref_label.npz" %SPEC_DIR)['arr_0']
test_flux = np.load("%s/ref_flux.npz" %SPEC_DIR)['arr_0']
test_ivar = np.load("%s/ref_ivar.npz" %SPEC_DIR)['arr_0']
#tr_id = np.load("./ref_id.npz")['arr_0']
#tr_flux = np.load("./ref_flux.npz")['arr_0']
#tr_ivar = np.load("./ref_ivar.npz")['arr_0']
#tr_label = np.load("./ref_label.npz")['arr_0']
ds = dataset.Dataset(
wl, test_id, test_flux, test_ivar, tr_label,
test_id, test_flux, test_ivar)
ds.set_label_names(
['T_{eff}', '\log g', '[Fe/H]', '[C/M]','[N/M]',
'[\\alpha/M]', 'A_k'])
m = model.CannonModel(2)
m.coeffs = np.load("./culled_coeffs.npz")['arr_0']
m.scatters = np.load("./culled_scatters.npz")['arr_0']
m.chisqs = np.load("./culled_chisqs.npz")['arr_0']
m.pivots = | np.load("./culled_pivots.npz") | numpy.load |
# -*- coding: utf-8 -*-
# Metropolis Drift-Diffusion-Model
# Copyright 2018 <NAME>, <NAME>, <NAME>
# This file is released under the MIT licence that accompanies the code
## This file contains the core computational routines
import numpy as np
import ddm as dm
from numbers import Real, Integral
## Some auxiliary functions
def uniform_proposal(n, b):
"""
choose a proposal uniformly from a set of `n` alternatives,
excluding the incumbent `b`.
"""
i = np.random.randint(n - 1)
return i + (i >= b)
def nonuniform_proposal(em, b):
"""
choose a proposal according to a symmetric stochastic matrix (the exploration matrix `em`);
the incumbent `b` is passed as a parameter and it is used to select the correct column
used for sampling.
"""
n = em.shape[0]
while True:
a = np.random.choice(np.arange(n), p = em[:,b])
if a != b:
return a
def cname(x):
"class name, for pretty-printing purposes only"
return x.__class__.__name__
## DDM sampling
def ddm_sample(u_a, u_b, lbarrier, ubarrier):
"""
Sample a response time and a choice outcome in a drift-diffusion-model decision process, given
the utilities and the decision thresholds. See also `DDMSampler` for a cached version.
Inputs
------
* `u_a`: utility of choice `a` (candidate)
* `u_b`: utility of choice `b` (incumbent)
* `lbarrier`: threshold for choice `a`
* `ubarrier`: threshold for choice `b`
Outputs
-------
* `RT`: a `float` representing the response time
* `CO`: a `bool` representing the choice outcome: `True` if the proposal is accepted
(`a` was chosen), `False` otherwise (`b` was chosen)
"""
mu_ab = np.array([(u_a - u_b) / np.sqrt(2)])
lbound_normarray = np.array([lbarrier / np.sqrt(2)])
ubound_normarray = np.array([ubarrier / np.sqrt(2)])
dt = 0.000000001 # irrelevant
RT, CO = dm.rand_asym(mu_ab, -lbound_normarray, ubound_normarray, dt, 1)
return RT[0], CO[0]
class DDMSampler:
"""
A class that helps sampling response times and a choice outcomes in a
drift-diffusion-model decision process, given a vector of utilities
and the decision thresholds. It works essentially like a version
of `ddm_sample` that caches the samples to improve speed (at the cost
of memory).
The constructor takes the following arguments:
* `u`: a vector of utilities
* `lb`: threshold for the incumbents
* `ub`: threshold for the candidated
* `cache_size`: the number of samples to cache for each combination of
the values in `u` (default is 50)
After creation, you can call it like a function with two arguments `a` and
`b` (two integers, the candidate and the incumbent), and it will return
a response time and a choice outcome (basically the same as `ddm_sample`).
"""
def __init__(self, u, lb, ub, cache_size = 50):
if cache_size <= 0:
raise ValueError('invalid `cache_size`, must be positive, given %i' % cache_size)
c = np.sqrt(2)
n = len(u)
self.n = n
self.mu = [np.array([(u[a] - u[b]) / c]) for a in range(n) for b in range(n)]
self.lb, self.ub = np.array([lb / c]), | np.array([ub / c]) | numpy.array |
import logging
from math import pi
import numpy as np
log = logging.getLogger(__name__)
def linear(N):
return (np.linspace(-1, 1, N), np.ones(N))
def trapezoid(N):
points = np.linspace(-1.0, 1.0, N)
w = 1.0 / float(N)
weights = np.ones(N) * 2.0 * w
weights[0] = weights[-1] = w
return (points, weights)
def periodic_trapezoid(N):
shift = 1.0 / float(N)
points = np.linspace(-1 + shift, 1 - shift, N)
weights = np.ones(N) * (2.0 / float(N))
return (points, weights)
def legendre_gauss_lobatto(N):
# Use the Chebyshev-Gauss-Lobatto nodes as the first guess
x = np.cos( | np.linspace(0, pi, N) | numpy.linspace |
import logging
from pathlib import Path
from typing import Union, List
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from collections import defaultdict
import scipy.stats as scst
from HPOBenchExperimentUtils import _log as _main_log
from HPOBenchExperimentUtils.utils.validation_utils import load_json_files, \
load_trajectories_as_df, df_per_optimizer
from HPOBenchExperimentUtils.utils.plotting_utils import plot_dc, color_per_opt, marker_per_opt,\
unify_layout
from HPOBenchExperimentUtils.utils.runner_utils import get_optimizer_setting, get_benchmark_settings
_main_log.setLevel(logging.DEBUG)
_log = logging.getLogger(__name__)
def plot_fidels(benchmark: str, output_dir: Union[Path, str], input_dir: Union[Path, str], opts: str,
opt_list: Union[List[str], None]=None, **kwargs):
_log.info(f'Plotting evaluated fidelities of benchmark {benchmark}')
input_dir = Path(input_dir) / benchmark
assert input_dir.is_dir(), f'Result folder doesn\"t exist: {input_dir}'
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
opt_rh_dc = load_trajectories_as_df(input_dir=input_dir,
which="runhistory")
if opt_list is None:
opt_list = list(opt_rh_dc.keys())
other_stats_dc = dict()
best_val = 1000
other_stats_dc["lowest"] = best_val
plt.figure(figsize=[5*len(opt_rh_dc), 5])
ax_old = None
for i, opt in enumerate(opt_rh_dc):
_log.info(f'Handling {opt}')
if opt not in opt_list:
_log.info(f'Skip {opt}')
continue
if len(opt_rh_dc) == 0: continue
other_stats_dc[opt] = defaultdict(list)
rhs = load_json_files(opt_rh_dc[opt])
for rh in rhs:
final_time = rh[-1]["finish_time"] - rh[0]["boot_time"]
bench_time = rh[-1]["total_time_used"]
calls = rh[-1]["function_call"]
other_stats_dc[opt]["final_time"].append(final_time)
other_stats_dc[opt]["bench_time"].append(bench_time)
other_stats_dc[opt]["calls"].append(calls)
df = df_per_optimizer(opt, rhs)
ax = plt.subplot(1, len(opt_rh_dc), i+1, sharey=ax_old)
thresh = 10000
if df.shape[0] > thresh:
sub = df[["fidel_values", "total_time_used"]].sample(n=thresh, random_state=1)
else:
sub = df[["fidel_values", "total_time_used"]]
avg = sub.shape[0] / len(df['id'].unique())
max_f = | np.max(sub["fidel_values"]) | numpy.max |
'''
Compute classification metrics for the preference learning models. Plot the predictions.
Created on 21 Oct 2016
@author: simpson
'''
import logging
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import f1_score, roc_auc_score, log_loss, accuracy_score
from scipy.stats import kendalltau
def compute_ranking_metrics(nmethods, gold_ranks, predicted_ranks, metrics = {}, nruns=1, r=0):
if not len(metrics):
metrics['tau'] = np.zeros(nmethods)
for i in range(nmethods):
metrics['tau'][i, r], _ = kendalltau(gold_ranks, predicted_ranks)
return metrics
def compute_metrics(nmethods, gold_prefs, predictions, metrics = {}, nruns=1, r=0):
# Task C2, C4: Compute accuracy metrics ---------------------------------------------------------------------------
logging.info('Task C2/C4, accuracy metrics')
if not len(metrics):
metrics['acc'] = np.zeros((nmethods, nruns))
metrics['f1'] = np.zeros((nmethods, nruns))
metrics['auc_roc'] = np.zeros((nmethods, nruns))
metrics['log_loss'] = np.zeros((nmethods, nruns))
# Not sure how to deal with preference labels where the true label is 0.5 with f1 score. For ROC curve we can
# combine two AUCs for negative class and positive class.
for i in range(nmethods):
ind_array = np.concatenate( ( predictions[:, i:i+1] < 1.0/3.0,
(predictions[:, i:i+1] >= 1.0/3.0) & (predictions[:, i:i+1] < 2.0/3.0),
predictions[:, i:i+1] > 2.0/3.0 ), axis=1)
ind_array_gold = np.concatenate( ( gold_prefs[:, np.newaxis] == 0,
gold_prefs[:, np.newaxis] == 0.5,
gold_prefs[:, np.newaxis] == 1 ), axis=1)
mistakes = np.round(ind_array) != ind_array_gold
print(ind_array[np.sum(mistakes, axis=1), :])
print(ind_array_gold[np.sum(mistakes, axis=1), :])
metrics['acc'][i,r] = accuracy_score(ind_array_gold, ind_array)
metrics['f1'][i,r] = f1_score(ind_array_gold, ind_array, average='weighted')
auc_a_less_b = roc_auc_score(gold_prefs==0, 1 - predictions[:, i])
frac_a_less_b = np.sum(gold_prefs==0) / float(len(gold_prefs))
auc_a_more_b = roc_auc_score(gold_prefs==1, predictions[:, i])
frac_a_more_b = np.sum(gold_prefs==1) / float(len(gold_prefs))
auc_a_equal_b = roc_auc_score(gold_prefs==0.5, 2 * (1 - np.abs(predictions[:, i] - 0.5)))
frac_a_equal_b = np.sum(gold_prefs==0.5) / float(len(gold_prefs))
metrics['auc_roc'][i,r] = auc_a_less_b * frac_a_less_b + auc_a_more_b * frac_a_more_b + auc_a_equal_b * frac_a_equal_b
predictions_safe = predictions[:, i].copy()
predictions_safe[predictions[:, i]<1e-7] = 1e-7
predictions_safe[predictions[:, i]>(1-1e-7)] = 1 - 1e-7
metrics['log_loss'][i,r] = -np.mean(gold_prefs * np.log(predictions_safe) + (1 - gold_prefs) * np.log(1 - predictions_safe))
return metrics
def plot_metrics(plotdir, metrics, nmethods, method_labels, nfolds, nanno, nanno_is_min=False, xlabels=None):
# Task C9/C10: Plotting metrics -----------------------------------------------------------------------------------
logging.info('Task C9/10, plotting accuracy metrics')
_, ax = plt.subplots()
if nanno_is_min:
ax.set_title('F1 Scores with %i-fold Cross Validation (data points with at least %i annotators)' % (nfolds, nanno))
else:
ax.set_title('F1 Scores with %i-fold Cross Validation (data points with %i annotators)' % (nfolds, nanno))
ind = np.arange(nmethods)
width = 0.6
if metrics['f1'].shape[1] == 1:
ax.bar(ind, metrics['f1'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('F1 Score')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['f1'][m], label=method_labels[m])
if np.any(xlabels):
plt.xlabel(xlabels)
plt.legend(loc='best')
plt.savefig(plotdir + '/f1scores.eps')
_, ax = plt.subplots()
ax.set_title('AUC of ROC Curve with %i-fold Cross Validation' % nfolds)
if metrics['auc_roc'].shape[1] == 1:
ax.bar(ind, metrics['auc_roc'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('AUC')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['auc_roc'][m], label=method_labels[m])
if np.any(xlabels):
plt.xlabel(xlabels)
plt.legend(loc='best')
plt.savefig(plotdir + '/auc_roc.eps')
_, ax = plt.subplots()
ax.set_title('Cross Entropy Error with %i-fold Cross Validation' % nfolds)
if metrics['log_loss'].shape[1] == 1:
plt.bar(ind, metrics['log_loss'], width=width)
ax.set_xlabel('Method')
ax.set_ylabel('Cross Entropy')
ax.set_xticks(ind + (width/2.0))
ax.set_xticklabels(method_labels)
else:
plt.hold(True)
for m in range(nmethods):
plt.plot(metrics['log_loss'][m], label=method_labels[m])
if | np.any(xlabels) | numpy.any |
""" lib/assess/metrics3d.py (Author: <NAME>, 2021)
Most commonly used segmentation metrics (or the ones I need for med-im projects.
Huge help from MONAI: https://www.github.com/Project-MONAI/MONAI
Usage Philosophy:
- Can take either detached cpu tensors or numpy arrays
- Returns all information (per example & per class results). It is up to
functions lower in the stack to aggregate in the form the application
requires.
e.g. batch_confusion_matrix returns a BxCx4
List of Metrics:
- Confusion Matrix (2D & 3D, Classif & Seg)
- Dice (2D & 3D, Seg)
- Jaccard (2D & 3D, Seg)
- Hausdorff (2D & 3D, Seg)
"""
import warnings
from collections import namedtuple
import numpy as np
import torch
from dmt.metrics.medpy_metrics import dc, jc, hd
from .unify import (reshape, stack, to_float, to_int, allclose, nan_to_num,
any as uni_any, sum as uni_sum)
from .seg_utils import get_mask_edges, get_surface_distance
# Module scope for multiprocessing compatibility
SegCM = namedtuple('SegCM', ('tp', 'fp', 'fn', 'tn'))
Mets = namedtuple('Mets', ('confusion', 'dice', 'jaccard', 'exists'))
def batch_metrics(preds, targs, ignore_background=False, naive_avg=True):
"""
Args:
preds (tensor or array): BxCxDxHxW (binary one-hot)
targs: (tensor or array) BxCxDxHxW (binary one-hot)
naive_avg: just set to False for inference. May skew results if
a given pred & targ batch doesn't have all classes.
"""
assert preds.shape == targs.shape
assert preds.ndim == 5
assert 'int' in str(preds.dtype)
assert 'int' in str(targs.dtype)
cdj_tuple = batch_cdj_metrics(preds, targs,
ignore_background=ignore_background)
nt_conf = cdj_tuple.confusion # named_tuple: tp, fp, tn, fn
nt_conf = SegCM(np.array(nt_conf.tp), np.array(nt_conf.fp),
np.array(nt_conf.fn), np.array(nt_conf.tn))
ec_dice = np.array(cdj_tuple.dice) # ec = element class
ec_jaccard = np.array(cdj_tuple.jaccard)
ec_exists = np.array(cdj_tuple.exists)
with | np.errstate(divide='ignore', invalid='ignore') | numpy.errstate |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# #########################################################################
# Copyright (c) 2020, UChicago Argonne, LLC. All rights reserved. #
# #
# Copyright 2020. UChicago Argonne, LLC. This software was produced #
# under U.S. Government contract DE-AC02-06CH11357 for Argonne National #
# Laboratory (ANL), which is operated by UChicago Argonne, LLC for the #
# U.S. Department of Energy. The U.S. Government has rights to use, #
# reproduce, and distribute this software. NEITHER THE GOVERNMENT NOR #
# UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR #
# ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE. If software is #
# modified to produce derivative works, such modified software should #
# be clearly marked, so as not to confuse it with the version available #
# from ANL. #
# #
# Additionally, redistribution and use in source and binary forms, with #
# or without modification, are permitted provided that the following #
# conditions are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of UChicago Argonne, LLC, Argonne National #
# Laboratory, ANL, the U.S. Government, nor the names of its #
# contributors may be used to endorse or promote products derived #
# from this software without specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago #
# Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
# #########################################################################
import warnings
from orangecontrib.wonder.fit.parameters.measured_data.phase import Phase
from orangecontrib.wonder.fit.parameters.initialization.fft_parameters import FFTTypes
from orangecontrib.wonder.fit.parameters.instrument.zero_error import ZeroError
from orangecontrib.wonder.fit.parameters.instrument.lab6_tan_correction import Lab6TanCorrection
from orangecontrib.wonder.fit.parameters.instrument.caglioti import Caglioti
from orangecontrib.wonder.fit.parameters.instrument.polarization_parameters import Beampath, LorentzFormula, PolarizationParameters
from orangecontrib.wonder.fit.parameters.instrument.speciment_displacement import SpecimenDisplacement
from orangecontrib.wonder.fit.parameters.thermal.thermal_parameters import ThermalParameters
from orangecontrib.wonder.fit.parameters.instrument.background_parameters import ChebyshevBackground, ExpDecayBackground
from orangecontrib.wonder.fit.parameters.microstructure.strain import InvariantPAH, WarrenModel, KrivoglazWilkensModel
from orangecontrib.wonder.fit.parameters.measured_data.phase_gsasii import GSASIIPhase
from orangecontrib.wonder.fit.functions.gsasii_functions import gsasii_intensity_factor
from orangecontrib.wonder.fit.parameters.additional.pseudo_voigt_peak import SpuriousPeaks
from orangecontrib.wonder.fit.functions.chemical_formula_parser import ChemicalFormulaParser
from orangecontrib.wonder.util.fit_utilities import Utilities, Symmetry
warnings.filterwarnings("ignore", category=RuntimeWarning)
class Distribution:
DELTA = "delta"
LOGNORMAL = "lognormal"
GAMMA = "gamma"
YORK = "york"
@classmethod
def tuple(cls):
return [cls.DELTA, cls.LOGNORMAL, cls.GAMMA, cls.YORK]
class Shape:
NONE = "none"
SPHERE = "sphere"
CUBE = "cube"
TETRAHEDRON = "tetrahedron"
OCTAHEDRON = "octahedron"
CYLINDER = "cylinder"
WULFF = "wulff solid"
@classmethod
def tuple(cls):
return [cls.NONE, cls.SPHERE, cls.CUBE, cls.TETRAHEDRON, cls.OCTAHEDRON, cls.CYLINDER, cls.WULFF]
class WulffCubeFace:
TRIANGULAR = "triangular"
HEXAGONAL = "hexagonal"
@classmethod
def tuple(cls):
return [cls.TRIANGULAR, cls.HEXAGONAL]
class Normalization:
NORMALIZE_TO_N = 0
NORMALIZE_TO_N2 = 1
@classmethod
def tuple(cls):
return ["to N", "to N\u00b2"]
def __H_invariant_square(h, k, l):
numerator = (h * h * k * k + k * k * l * l + l * l * h * h)
denominator = (h**2 + k**2 + l**2)**2
return numerator / denominator
def __merge_functions(list_of_pairs, s):
# x step must be the same for all functions
intensity = numpy.zeros(len(s))
for pair_index in range(list_of_pairs.shape[0]):
intensity += numpy.interp(s, list_of_pairs[pair_index, 0], list_of_pairs[pair_index, 1])
return intensity
#################################################
#
# FIT FUNCTION
#
#################################################
def fit_function_direct(twotheta, fit_global_parameters, diffraction_pattern_index = 0):
incident_radiation = fit_global_parameters.measured_dataset.get_incident_radiations_item(diffraction_pattern_index)
wavelength = incident_radiation.wavelength.value
I = fit_function_reciprocal(Utilities.s(0.5*numpy.radians(twotheta), wavelength),
fit_global_parameters,
diffraction_pattern_index)
# POLARIZATION FACTOR --------------------------------------------------------------------------------------
polarization_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
if not polarization_parameters is None:
if polarization_parameters.use_polarization_factor:
twotheta_mono = polarization_parameters.twotheta_mono
I *= polarization_factor(numpy.radians(twotheta),
None if twotheta_mono is None else numpy.radians(twotheta_mono),
polarization_parameters.degree_of_polarization,
polarization_parameters.beampath)
# ADD BACKGROUNDS ---------------------------------------------------------------------------------------------
if not fit_global_parameters.background_parameters is None:
for key in fit_global_parameters.background_parameters.keys():
background_parameters = fit_global_parameters.get_background_parameters_item(key, diffraction_pattern_index)
if not background_parameters is None:
if key == ChebyshevBackground.__name__:
parameters=[background_parameters.c0.value,
background_parameters.c1.value,
background_parameters.c2.value,
background_parameters.c3.value,
background_parameters.c4.value,
background_parameters.c5.value,
background_parameters.c6.value,
background_parameters.c7.value,
background_parameters.c8.value,
background_parameters.c9.value]
add_chebyshev_background(twotheta, I, parameters)
elif key == ExpDecayBackground.__name__:
add_expdecay_background(twotheta,
I,
parameters=[background_parameters.a0.value,
background_parameters.b0.value,
background_parameters.a1.value,
background_parameters.b1.value,
background_parameters.a2.value,
background_parameters.b2.value])
# ADD PSEUDO VOIGTS ---------------------------------------------------------------------------------------------
if not fit_global_parameters.additional_parameters is None:
for key in fit_global_parameters.additional_parameters.keys():
additional_parameters = fit_global_parameters.get_additional_parameters_item(key, diffraction_pattern_index)
if not additional_parameters is None:
if key == SpuriousPeaks.__name__:
add_pseudo_voigt_peaks(twotheta, I, additional_parameters)
return I
def fit_function_reciprocal(s, fit_global_parameters, diffraction_pattern_index = 0):
line_profile = fit_global_parameters.measured_dataset.get_line_profile(diffraction_pattern_index)
incident_radiation = fit_global_parameters.measured_dataset.get_incident_radiations_item(diffraction_pattern_index)
# CONSTRUCTION OF EACH SEPARATE PEAK ---------------------------------------------------------------------------
phases_number = fit_global_parameters.measured_dataset.get_phases_number()
separated_phases_intensities = numpy.full((phases_number, 2), None)
for phase_index in range(phases_number):
phase = fit_global_parameters.measured_dataset.get_phase(phase_index)
if not Phase.is_cube(phase.symmetry): raise ValueError("Only Cubic structures are supported by fit")
reflections_number = line_profile.get_reflections_number(phase_index)
separated_peaks_functions = numpy.full((reflections_number, 2), None)
for reflection_index in range(reflections_number):
if isinstance(phase, GSASIIPhase):
s_analytical, intensity_analytical = create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters,
gsas_reflections_list=line_profile.get_additional_parameters_of_phase(phase_index))
else:
s_analytical, intensity_analytical = create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters)
separated_peaks_functions[reflection_index, 0] = s_analytical
separated_peaks_functions[reflection_index, 1] = intensity_analytical
# INTERPOLATION ONTO ORIGINAL S VALUES -------------------------------------------------------------------------
intensity_phase = __merge_functions(separated_peaks_functions, s)
# ADD SAXS
size_parameters = fit_global_parameters.get_size_parameters(phase_index)
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.DELTA and size_parameters.add_saxs:
if not phase.use_structure: NotImplementedError("SAXS is available when the structural model is active")
intensity_phase += saxs(s,
size_parameters.mu.value,
phase.a.value,
phase.formula,
phase.symmetry,
size_parameters.normalize_to)
# ADD DEBYE-WALLER FACTOR --------------------------------------------------------------------------------------
thermal_parameters = fit_global_parameters.get_thermal_parameters_item(ThermalParameters.__name__, diffraction_pattern_index)
if not thermal_parameters is None:
debye_waller_factor = thermal_parameters.get_debye_waller_factor(phase_index)
if not debye_waller_factor is None:
intensity_phase *= debye_waller(s, debye_waller_factor.value*0.01) # from A-2 to nm-2
separated_phases_intensities[phase_index, 0] = s
separated_phases_intensities[phase_index, 1] = intensity_phase
intensity = __merge_functions(separated_phases_intensities, s)
if not incident_radiation.is_single_wavelength:
principal_wavelength = incident_radiation.wavelength
intensity_scaled = intensity*incident_radiation.get_principal_wavelenght_weight()
separated_secondary_intensities = numpy.full((len(incident_radiation.secondary_wavelengths), 2), None)
secondary_index = 0
for secondary_wavelength, secondary_wavelength_weigth in zip(incident_radiation.secondary_wavelengths,
incident_radiation.secondary_wavelengths_weights):
s_secondary = s * secondary_wavelength.value/principal_wavelength.value
separated_secondary_intensities[secondary_index, 0] = s_secondary
separated_secondary_intensities[secondary_index, 1] = intensity*secondary_wavelength_weigth.value
secondary_index += 1
intensity = intensity_scaled + __merge_functions(separated_secondary_intensities, s)
return intensity
#################################################
# FOURIER FUNCTIONS
#################################################
class FourierTranformFactory:
@classmethod
def get_fourier_transform(cls, type=FFTTypes.REAL_ONLY):
if type == FFTTypes.REAL_ONLY:
return FourierTransformRealOnly
elif type == FFTTypes.FULL:
return FourierTransformFull
else:
raise ValueError("Type not recognized")
class FourierTransform:
@classmethod
def fft(cls, f, n_steps, dL):
raise NotImplementedError()
@classmethod
def get_empty_fft(cls, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
I = numpy.zeros(len(s))
I[int(len(s)/2)] = 1.0
return s, I
class FourierTransformRealOnly(FourierTransform):
@classmethod
def _real_absolute_fourier(cls, y):
return numpy.fft.fftshift(numpy.abs(numpy.real(numpy.fft.fft(y))))
@classmethod
def _fft_normalized(cls, y_fft, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
integral = numpy.trapz(y_fft, s)
return s, y_fft / integral
@classmethod
def fft(cls, f, n_steps, dL):
return cls._fft_normalized(cls._real_absolute_fourier(f), n_steps, dL)
from scipy.integrate import simps
class FourierTransformFull(FourierTransform):
@classmethod
def _full_fourier(cls, y):
return numpy.fft.fftshift(numpy.fft.fft(y))
@classmethod
def _fft_shifted(cls, y_fft, n_steps, dL):
s = numpy.fft.fftfreq(n_steps, dL)
s = numpy.fft.fftshift(s)
y_fft -= y_fft[0]
return s, y_fft
@classmethod
def _fft_real(cls, f, n_steps, dL):
return cls._fft_shifted(numpy.real(cls._full_fourier(f)), n_steps, dL)
@classmethod
def _fft_imag(cls, f, n_steps, dL):
return cls._fft_shifted(numpy.imag(cls._full_fourier(f)), n_steps, dL)
@classmethod
def _normalize(cls, s, i):
return s, i/simps(i, s)
@classmethod
def fft(cls, f, n_steps, dL):
sr, fft_real = cls._fft_real(numpy.real(f), n_steps, dL)
si, fft_imag = cls._fft_imag(numpy.imag(f), n_steps, dL)
return cls._normalize(sr, fft_real - fft_imag)
#################################################
# CALCOLO DI UN SINGOLO PICCO
#################################################
def create_one_peak(diffraction_pattern_index,
phase_index,
reflection_index,
incident_radiation,
phase,
line_profile,
fit_global_parameters,
gsas_reflections_list=None):
fft_type = fit_global_parameters.fit_initialization.fft_parameters.fft_type
fit_space_parameters = fit_global_parameters.space_parameters()
reflection = line_profile.get_reflection(phase_index, reflection_index)
wavelength = incident_radiation.wavelength.value
lattice_parameter = phase.a.value
fourier_amplitudes = None
# INSTRUMENTAL PROFILE ---------------------------------------------------------------------------------------------
instrumental_profile_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(Caglioti.__name__, diffraction_pattern_index)
if not instrumental_profile_parameters is None:
if fourier_amplitudes is None:
fourier_amplitudes = instrumental_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
else:
fourier_amplitudes *= instrumental_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
wavelength,
instrumental_profile_parameters.U.value,
instrumental_profile_parameters.V.value,
instrumental_profile_parameters.W.value,
instrumental_profile_parameters.a.value,
instrumental_profile_parameters.b.value,
instrumental_profile_parameters.c.value)
# SIZE -------------------------------------------------------------------------------------------------------------
size_parameters = fit_global_parameters.get_size_parameters(phase_index)
if not size_parameters is None and size_parameters.active:
if size_parameters.distribution == Distribution.LOGNORMAL:
if size_parameters.shape == Shape.SPHERE:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_lognormal(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_lognormal(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
elif size_parameters.shape == Shape.WULFF:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_wulff_solids_lognormal(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
size_parameters.sigma.value,
size_parameters.mu.value,
size_parameters.truncation.value,
size_parameters.cube_face)
else:
fourier_amplitudes *=size_function_wulff_solids_lognormal(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
size_parameters.sigma.value,
size_parameters.mu.value,
size_parameters.truncation.value,
size_parameters.cube_face)
elif size_parameters.distribution == Distribution.GAMMA:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_gamma(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_gamma(fit_space_parameters.L,
size_parameters.sigma.value,
size_parameters.mu.value)
elif size_parameters.distribution == Distribution.DELTA:
if fourier_amplitudes is None:
fourier_amplitudes = size_function_delta(fit_space_parameters.L,
size_parameters.mu.value)
else:
fourier_amplitudes *= size_function_delta(fit_space_parameters.L,
size_parameters.mu.value)
# STRAIN -----------------------------------------------------------------------------------------------------------
strain_parameters = fit_global_parameters.get_strain_parameters(phase_index)
if not strain_parameters is None and strain_parameters.active:
if isinstance(strain_parameters, InvariantPAH): # INVARIANT PAH
if fourier_amplitudes is None:
fourier_amplitudes = strain_invariant_function_pah(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
else:
fourier_amplitudes *= strain_invariant_function_pah(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.aa.value,
strain_parameters.bb.value,
strain_parameters.get_invariant(reflection.h,
reflection.k,
reflection.l))
elif isinstance(strain_parameters, KrivoglazWilkensModel): # KRIVOGLAZ-WILKENS
if fourier_amplitudes is None:
fourier_amplitudes = strain_krivoglaz_wilkens(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
else:
fourier_amplitudes *= strain_krivoglaz_wilkens(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.rho.value,
strain_parameters.Re.value,
strain_parameters.Ae.value,
strain_parameters.Be.value,
strain_parameters.As.value,
strain_parameters.Bs.value,
strain_parameters.mix.value,
strain_parameters.b.value)
elif isinstance(strain_parameters, WarrenModel): # WARREN
fourier_amplitudes_re, fourier_amplitudes_im = strain_warren_function(fit_space_parameters.L,
reflection.h,
reflection.k,
reflection.l,
lattice_parameter,
strain_parameters.average_cell_parameter.value)
if fft_type == FFTTypes.FULL:
if fourier_amplitudes is None:
fourier_amplitudes = fourier_amplitudes_re + 1j*fourier_amplitudes_im
else:
fourier_amplitudes = (fourier_amplitudes*fourier_amplitudes_re) + 1j*(fourier_amplitudes*fourier_amplitudes_im)
elif fft_type == FFTTypes.REAL_ONLY:
if fourier_amplitudes is None:
fourier_amplitudes = fourier_amplitudes_re
else:
fourier_amplitudes *= fourier_amplitudes_re
# FFT -----------------------------------------------------------------------------------------------------------
if not fourier_amplitudes is None:
s, I = FourierTranformFactory.get_fourier_transform(fft_type).fft(fourier_amplitudes,
n_steps=fit_global_parameters.fit_initialization.fft_parameters.n_step,
dL=fit_space_parameters.dL)
else:
s, I = FourierTransform.get_empty_fft(n_steps=fit_global_parameters.fit_initialization.fft_parameters.n_step,
dL=fit_space_parameters.dL)
s_hkl = Utilities.s_hkl(lattice_parameter, reflection.h, reflection.k, reflection.l)
s += s_hkl
# INTENSITY MODULATION: STRUCTURAL MODEL YES/NO --------------------------------------------------------------------
if phase.use_structure:
if isinstance(phase, GSASIIPhase):
I *= phase.intensity_scale_factor.value * gsasii_intensity_factor(reflection.h,
reflection.k,
reflection.l,
gsas_reflections_list)
else:
I *= phase.intensity_scale_factor.value * \
multiplicity_cubic(reflection.h, reflection.k, reflection.l) * \
squared_modulus_structure_factor(s_hkl,
phase.formula,
reflection.h,
reflection.k,
reflection.l,
phase.symmetry)
else:
I *= reflection.intensity.value
#TODO: AGGIUNGERE GESTIONE TDS con strutture dati + widget ad hoc
# PEAK SHIFTS -----------------------------------------------------------------------------------------------------
if not fit_global_parameters.shift_parameters is None:
theta = Utilities.theta(s, wavelength)
for key in fit_global_parameters.shift_parameters.keys():
shift_parameters = fit_global_parameters.get_shift_parameters_item(key, diffraction_pattern_index)
if not shift_parameters is None:
if key == Lab6TanCorrection.__name__:
s += lab6_tan_correction(theta, wavelength,
shift_parameters.ax.value,
shift_parameters.bx.value,
shift_parameters.cx.value,
shift_parameters.dx.value,
shift_parameters.ex.value)
elif key == ZeroError.__name__:
s += Utilities.s(shift_parameters.shift.value/2, wavelength)
elif key == SpecimenDisplacement.__name__:
s += specimen_displacement(theta, wavelength, shift_parameters.goniometer_radius, shift_parameters.displacement.value*1e-6) # to m
# LORENTZ FACTOR --------------------------------------------------------------------------------------
if not fit_global_parameters.instrumental_profile_parameters is None:
polarization_parameters = fit_global_parameters.get_instrumental_profile_parameters_item(PolarizationParameters.__name__, diffraction_pattern_index)
if not polarization_parameters is None:
if polarization_parameters.use_lorentz_factor:
if polarization_parameters.lorentz_formula == LorentzFormula.Shkl_Shkl:
I *= lorentz_factor_simplified_normalized(s_hkl, wavelength)
elif polarization_parameters.lorentz_formula == LorentzFormula.S_Shkl:
I *= lorentz_factor_normalized(s, s_hkl, wavelength)
return s, I
######################################################################
# FUNZIONI WPPM
######################################################################
import numpy
from scipy.special import erfc
import os
# performance improvement
######################################################################
# THERMAL AND POLARIZATION
######################################################################
def debye_waller(s, B):
return numpy.exp(-0.5*B*(s**2)) # it's the exp(-2M) = exp(-Bs^2/2)
def lorentz_factor(s, s_hkl):
return 1/(s*s_hkl)
def lorentz_factor_normalized(s, s_hkl, wavelength):
return lorentz_factor(s, s_hkl)/numpy.sqrt(1 - (s*wavelength/2)**2)
def lorentz_factor_simplified(s_hkl):
return 1/(s_hkl**2)
def lorentz_factor_simplified_normalized(s_hkl, wavelength):
return lorentz_factor_simplified(s_hkl)/numpy.sqrt(1 - (s_hkl*wavelength/2)**2)
def polarization_factor(twotheta, twotheta_mono, degree_of_polarization, beampath):
Q = degree_of_polarization
if twotheta_mono is None or twotheta_mono == 0.0:
return ((1+Q) + (1-Q)*(numpy.cos(twotheta)**2))/2
else:
if beampath == Beampath.PRIMARY:
return ((1+Q) + (1-Q)*( | numpy.cos(twotheta_mono) | numpy.cos |
""" Unit tests for the Permutation explainer.
"""
# pylint: disable=missing-function-docstring
import pickle
import numpy as np
import shap
from . import common
def test_exact_second_order():
""" This tests that the Perumtation explain gives exact answers for second order functions.
"""
| np.random.seed(0) | numpy.random.seed |
import nose
import copy
import numpy as np
from scipy.sparse import csr_matrix
import pycuda.autoinit
import pycuda.gpuarray as gpu
from pycuda.driver import Stream
from binet import cusparse
from binet.gpucsrarray import *
from numpy.testing import assert_allclose, assert_array_equal
from nose.tools import nottest
init()
def test_gpucsrarray():
A = | np.random.laplace(size=(5, 3)) | numpy.random.laplace |
#!/usr/bin/env python
"""
wavelength.py
===============
::
ARG=6 ipython -i wavelength.py
ARG=7 ipython -i wavelength.py
ARG=8 ipython -i wavelength.py
ARG=11 ipython -i wavelength.py
ARG=12 ipython -i wavelength.py
ARG=13 ipython -i wavelength.py
ARG=15 ipython -i wavelength.py
"""
import os, numpy as np, logging
log = logging.getLogger(__name__)
from opticks.ana.key import keydir
from opticks.ana.nbase import chi2
class Wavelength(object):
"""
Comparing wavelength distribs between many different samples
"""
FOLD = "/tmp/wavelength"
def get_key(self, label):
key = None
for k,v in self.l.items():
if v == label:
key = k
pass
pass
return key
def get_keys(self, a_label, b_label):
a = self.get_key(a_label)
b = self.get_key(b_label)
return a, b
def __call__(self, label):
return self.get_key(label)
def format(self, i):
return " %2d : %d : %50s : %s " % (i, os.path.exists(self.p[i]), self.l[i], self.p[i])
def __init__(self, kd):
p = {}
l = {}
l[0] = "DsG4Scintillator_G4OpticksAnaMgr" ## horses mouth
p[0] = "/tmp/G4OpticksAnaMgr/WavelengthSamples.npy"
l[1] = "Opticks_QCtxTest_hd20"
p[1] = os.path.join("/tmp/QCtxTest", "wavelength_20.npy")
l[2] = "Opticks_QCtxTest_hd0"
p[2] = os.path.join("/tmp/QCtxTest", "wavelength_0.npy")
l[3] = "Opticks_QCtxTest_hd20_cudaFilterModePoint"
p[3] = os.path.join("/tmp/QCtxTest", "wavelength_20_cudaFilterModePoint.npy")
l[4] = "Opticks_QCtxTest_hd0_cudaFilterModePoint"
p[4] = os.path.join("/tmp/QCtxTest", "wavelength_0_cudaFilterModePoint.npy")
l[5] = "X4"
p[5] = "/tmp/X4ScintillationTest/g4localSamples.npy"
l[6] = "GScintillatorLib_np_interp"
p[6] = os.path.join(kd,"GScintillatorLib/GScintillatorLib.npy")
l[7] = "ck_photon_1M"
p[7] = os.path.join("/tmp/QCtxTest", "cerenkov_photon_1000000.npy")
l[8] = "G4Cerenkov_modified_SKIP_CONTINUE"
p[8] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_step_length_100000.000_SKIP_CONTINUE", "GenWavelength.npy")
l[9] = "G4Cerenkov_modified_ASIS"
p[9] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_step_length_100000.000_ASIS", "GenWavelength.npy")
l[10] = "G4Cerenkov_modified_SKIP_CONTINUE_10k"
p[10] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_10000_SKIP_CONTINUE", "GenWavelength.npy")
l[11] = "G4Cerenkov_modified_SKIP_CONTINUE_1M"
p[11] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUE", "GenWavelength.npy")
l[12] = "G4Cerenkov_modified_SKIP_CONTINUE_1M_seed1"
p[12] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUEseed_1_", "GenWavelength.npy")
l[13] = "G4Cerenkov_modified_SKIP_CONTINUE_1M_seed2"
p[13] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUEseed_2_", "GenWavelength.npy")
l[14] = "G4Cerenkov_modified_SKIP_CONTINUE_1M_FLOAT_TEST"
p[14] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUE_FLOAT_TEST", "GenWavelength.npy")
l[15] = "ck_photon_1M_FLIP_RANDOM"
p[15] = os.path.join("/tmp/QCtxTest", "cerenkov_photon_FLIP_RANDOM_1000000.npy")
l[16] = "G4Cerenkov_modified_SKIP_CONTINUE_1M_seed1f"
p[16] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUE_FLOAT_TEST_seed_1_", "GenWavelength.npy")
l[17] = "G4Cerenkov_modified_SKIP_CONTINUE_1M_seed2f"
p[17] = os.path.join("/tmp/G4Cerenkov_modifiedTest", "BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUE_FLOAT_TEST_seed_2_", "GenWavelength.npy")
l[18] = "ana_ck_1M"
p[18] = "/tmp/ck/ck_1000000.npy"
l[19] = "G4Cerenkov_modified_SKIP_CONTINUE_1M_PRECOOKED"
p[19] = "/tmp/G4Cerenkov_modifiedTest/BetaInverse_1.500_override_fNumPhotons_1000000_SKIP_CONTINUE_PRECOOKED/GenWavelength.npy"
l[20] = "ck_photon_enprop_1M"
p[20] = os.path.join("/tmp/QCtxTest", "cerenkov_photon_enprop_1000000.npy")
l[21] = "ck_photon_expt_1M"
p[21] = os.path.join("/tmp/QCtxTest", "cerenkov_photon_expt_1000000.npy")
l[22] = "rindex_en_integrated_1M"
p[22] = "/tmp/rindex/en_integrated_lookup_1M.npy"
self.p = p
self.l = l
dom = np.arange(80, 400, 4)
#dom = np.arange(300, 600, 1)
#dom = np.arange(385, 475, 1)
#dom = np.arange(350, 550, 1)
a = {}
e = {}
w = {}
h = {}
r = {}
for i in range(len(l)):
p_exists = os.path.exists(p[i])
print(self.format(i))
if not p_exists:
a[i] = None
w[i] = None
h[i] = None
else:
a[i] = np.load(p[i])
if l[i].startswith("ck_photon"):
e[i] = a[i][:,0,0]
w[i] = a[i][:,0,1]
r[i] = a[i][:,0,2]
elif l[i].startswith("rindex_en_integrated"):
e[i] = a[i]
w[i] = 1240./e[i]
r[i] = np.zeros( len(a[i]) )
elif l[i].startswith("ana_ck"):
w[i] = a[i][:,0,1]
elif l[i].startswith("G4Cerenkov_modified"):
e[i] = a[i][:,0,0]
w[i] = a[i][:,0,1]
r[i] = a[i][:,0,2]
elif l[i] == "GScintillatorLib_np_interp":
aa = a[i]
self.aa = aa
aa0 = aa[0,:,0]
bb0 = np.linspace(0,1,len(aa0))
u = | np.random.rand(1000000) | numpy.random.rand |
import gc
import six
import numpy as np
from abc import ABCMeta, abstractmethod
from .base import BaseEstimator
class BaseSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3, max_iter=100):
self.max_iter = max_iter
self._kernel = kernel
self.degree = degree
self.C = C
self.epsilon = epsilon
# 显式的初始化变量
def init(self, X: np.ndarray, y: np.ndarray):
self.m, self.n = X.shape
self.X = X
self.y = y
self.b = 0.0
self.K = self.kernel_mat(X, X) # 全部计算好的kernel
self.alpha = np.zeros(self.m)
self.g_v = self.g_vec()
self.E = self.g_v - self.y # 将Ei保存在一个列表里
# g(x),输入xi(X[i])
def g(self, i):
in_sigma = self.alpha * self.y * self.K[i]
self.g_v[i] = np.sum(in_sigma) + self.b
return self.g_v[i]
# vec结尾的是函数,v结尾的是缓存向量
def g_vec(self):
before_sigma = self.K * self.alpha * self.y
return np.sum(before_sigma, axis=-1) + self.b
# E(x)为g(x)对输入x的预测值和y的差
def _e(self, i):
return self.g(i) - self.y[i]
# 核函数
def kernel(self, x1: np.ndarray, x2: np.ndarray):
if self._kernel == 'linear':
return np.sum(x1 * x2)
elif self._kernel == 'poly':
return (np.sum(x1 * x2) + 1) ** self.degree
return 0
# 向量化kernel,一次获得xi对所有x的
def kernel_vec(self, x: np.ndarray): # todo: 待添加更多kernel
if self._kernel == 'linear':
return np.sum(self.X * x, axis=-1)
elif self._kernel == 'poly':
return (np.sum(self.X * x, axis=-1) + 1) ** self.degree
return None
# 直接计算所有kernel,以后直接调用
def kernel_mat(self, X1: np.ndarray, X2: np.ndarray):
x1 = X1[np.newaxis, ...]
x2 = X2[:, np.newaxis, :]
if self._kernel == 'linear':
return np.sum(x1 * x2, axis=-1) # 广播,对最后一个维度求和
elif self._kernel == 'poly':
return (np.sum(x1 * x2, axis=-1) + 1) ** self.degree
return None
# 选择α1 α2,返回index
def select_alpha(self):
_a = self.alpha
# 得到mask
con1, con2 = (_a > 0), (_a < self.C)
# yi*g(xi)
ygx = self.y * self.g_v
# αi == 0 and yi*gxi ≥ 1-ε KKT条件
err1 = ygx - 1 + self.epsilon
err1[(con1 & (err1 <= 0)) | (~con1 & (err1 > 0))] = 0 # 不在此类或符合置0
# 0 < αi < C and abs(yi*gxi - 1) ≤ ε
err2 = np.abs(ygx - 1) - self.epsilon
err2[~con1 | ~con2] = 0 # 置 αi ≤ 0 and αi ≥ C 的为0
# αi == C and yi*gxi ≤ 1+ε
err3 = ygx - 1 - self.epsilon
err3[(con2 & (err3 >= 0)) | (~con2 & (err3 < 0))] = 0
# 计算总error,排序获得index
err = err1 ** 2 + err2 ** 2 + err3 ** 2
# α1为违反KKT条件最严重的点
i1 = np.argmax(err)
# 如果E1是+,选择最小的;如果E1是负的,选择最大的
i2 = np.argmin(self.E) if self.E[i1] >= 0 else np.argmax(self.E)
if i2 == i1: # 如果相等,返回第二大or小的
if self.E[i1] >= 0:
i2 = np.argsort(self.E)[1]
else:
i2 = np.argsort(self.E)[-2]
return i1, i2
# 优化α1 α2,更新b
def optimize(self, i1, i2):
a1_old, a2_old, b_old = self.alpha[i1], self.alpha[i2], self.b
_y, _K = self.y, self.K
# 边界
if _y[i1] == _y[i2]:
L = max(0, a2_old + a1_old - self.C)
H = min(self.C, a2_old + a1_old)
else:
L = max(0, a2_old - a1_old)
H = min(self.C, self.C + a2_old - a1_old)
E1, E2 = self.E[i1], self.E[i2]
eta = _K[i1, i1] + _K[i2, i2] - 2 * _K[i1, i2] # 7.107 η = K11 + K22 - 2K12
if eta <= 0:
print('eta <= 0')
return
a2_new_unc = a2_old + _y[i2] * (E1 - E2) / eta # 7.106
def cut_alpha(a, h, l): # 7.108
if a > h:
return h
elif a < l:
return l
else:
return a
a2_new = cut_alpha(a2_new_unc, H, L)
a1_new = a1_old + _y[i1] * _y[i2] * (a2_old - a2_new) # 7.109
b1_new = -E1 - _y[i1] * _K[i1, i1] * (a1_new - a1_old) - \
_y[i2] * _K[i2, i1] * (a2_new - a2_old) + b_old # 7.115
b2_new = -E2 - _y[i1] * _K[i1, i2] * (a1_new - a1_old) - \
_y[i2] * _K[i2, i2] * (a2_new - a2_old) + b_old # 7.116
if 0 < a1_new < self.C:
b_new = b1_new
elif 0 < a2_new < self.C:
b_new = b2_new
else: # 选择中点,否则b1=b2的
b_new = (b1_new + b2_new) / 2
def e_new(i):
in_sigma = self.alpha * self.y * self.K[i]
in_sigma[self.alpha <= 0] = 0
in_sigma[self.alpha > self.C] = 0
return np.sum(in_sigma) + self.b - self.y[i]
# 更新参数
self.alpha[i1], self.alpha[i2] = a1_new, a2_new
self.b = b_new
self.E[i1], self.E[i2] = e_new(i1), e_new(i2)
return None
# 停机条件
def shutdown(self):
c, eps = self.C, self.epsilon
_a, _y = self.alpha, self.y
if np.any(_a < 0) | np.any(_a > c):
return False
elif np.abs(np.sum(_a * _y)) > eps:
return False
else:
ygx = _y * self.g_v
if np.any(ygx[_a == 0] < 1 - eps):
return False
elif np.any(np.abs(ygx[(0 < _a) & (_a < c)] - 1) > eps):
return False
elif np.any(ygx[_a == c] > 1 + eps):
return False
return True
def fit(self, X: np.ndarray, y: np.ndarray):
self.init(X, y)
for t in range(self.max_iter):
i1, i2 = self.select_alpha() # 选择优化变量
self.optimize(i1, i2) # 求解两个变量的最优化问题
if self.shutdown(): # 停机条件
# print('Early stop')
break
return
def predict(self, X):
fx = self.f(X)
fx[fx >= 0] = 1
fx[fx < 0] = -1
return fx
# 决策函数
def f(self, X):
k_mat = self.kernel_mat(self.X, X) # shape: n_test, n_train
in_sigma = k_mat * self.alpha * self.y # 广播
return np.sum(in_sigma, axis=-1) + self.b
class SVC(BaseSVM):
def __init__(self, kernel='linear', degree=3, C=1.0, epsilon=1e-3,
max_iter=100, decision_function_shape='ovr'):
super(SVC, self).__init__(kernel=kernel, degree=degree, C=C,
epsilon=epsilon, max_iter=max_iter)
self.decision_function_shape = decision_function_shape
# 生成二分类的数据
def get_xyi(self, X, y, i, j=0):
xi, yi = X.copy(), y.copy()
if self.decision_function_shape == 'ovr':
yi[yi != i] = -1
yi[yi == i] = 1
elif self.decision_function_shape == 'ovo':
pi, pj = yi == i, yi == j
xi, yi = xi[pi | pj], yi[pi | pj]
yi[yi == j] = -1
yi[yi == i] = 1
return xi, yi
def fit(self, X: np.ndarray, y: np.ndarray):
self.n_classes = len(set(y.tolist()))
if self.decision_function_shape == 'ovr':
self.K = self.kernel_mat(X, X)
self.svms = list()
for i in range(self.n_classes):
svm = BaseSVM(self._kernel, self.degree, self.C, self.epsilon, self.max_iter)
svm.fit(*self.get_xyi(X, y, i))
del svm.K
gc.collect()
svm.K = self.K
self.svms.append(svm)
elif self.decision_function_shape == 'ovo':
self.svmd = dict()
for i in range(self.n_classes):
for j in range(i + 1, self.n_classes):
svm = BaseSVM(self._kernel, self.degree, self.C, self.epsilon, self.max_iter)
xij, yij = self.get_xyi(X, y, i, j)
svm.fit(xij, yij)
self.svmd[f"{i}-{j}"] = svm
else:
raise ValueError("不支持的策略!")
# 多分类决策函数
def decision_function(self, predictions: np.ndarray) -> np.ndarray:
pred_y = np.zeros(predictions.shape[0])
if self.decision_function_shape == 'ovr':
pred_y = predictions.argmax(axis=-1)
elif self.decision_function_shape == 'ovo':
pred_y = predictions.argmax(axis=-1)
return pred_y
def predict(self, X):
n_samples = X.shape[0]
predictions = | np.zeros([n_samples, self.n_classes]) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 8 12:20:41 2019
@author: Mike
"""
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import cifar10
from keras.utils import np_utils
from keras import backend as K
# In *older* versions of Tensorflow/Keras you may need to adjust the image
# dimension ordering. Read about channel ordering here:
# https://machinelearningmastery.com/a-gentle-introduction-to-channels-first-and-channels-last-image-formats-for-deep-learning/
# This is the main function. You need to write the getModel and fitModel functions to pass to this.
# Call your functions 'myGetModel' and 'myFitModel'.
# The getModel function should accept an object of the CIFAR class, and return a compiled Keras CNN model.
# In this function you will specify the network structure (including regularization) and the optimizer to
# be used (and its parameters like learning rate), and run compile the model (in the Keras sense of running
# model.compile).
# The fitModel function should accect two arguments. The first is the CNN model you return from your getModel
# function, and the second is the CIFAR classed data object. It will return a trained Keras CNN model, which
# will then be applied to the test data. In this function you will train the model, using the Keras model.fit
# function. You will need to specify all parameters of the training algorithm (batch size, etc), and the
# callbacks you will use (EarlyStopping and ModelCheckpoint). You will need to make sure you save and load
# into the model the weight values of its best performing epoch.
def runImageClassification(getModel=None,fitModel=None,seed=7):
# Fetch data. You may need to be connected to the internet the first time this is done.
# After the first time, it should be available in your system. On the off chance this
# is not the case on your system and you find yourself repeatedly downloading the data,
# you should change this code so you can load the data once and pass it to this function.
print("Preparing data...")
data=CIFAR(seed)
# Create model
print("Creating model...")
model=getModel(data)
# Fit model
print("Fitting model...")
model=fitModel(model,data)
# Evaluate on test data
print("Evaluating model...")
score = model.evaluate(data.x_test, data.y_test, verbose=0)
print('Test accuracy:', score[1])
# This is the class that wraps the CIFAR data. You will probably need to be connected to the
# internet the first time you create an object of this class, as the data will be downloaded.
# After that, the data should be stored by Keras and no downloading will be required.
# Important fields that you will need to use are: x_train, y_train, x_valid, y_valid, input_dim and
# num_classes. The first four of these are the training and validation data (split into features and
# target). Note that these have been made ready for use with a Keras network - check out the code
# if you are interested. The last two are the number of input features and the number of target
# classes. These will be needed when defining your CNN.
# The only public method is the showImages function, which you can use to see some labelled images
# from the (validation) data.
# Remember that the x_test and y_test fields will be blank when your functions are run in evaluation -
# so you cannot peek at these cases!
class CIFAR:
def __init__(self,seed=0):
# Get and split data
data = self.__getData(seed)
self.x_train_raw=data[0][0]
self.y_train_raw=data[0][1]
self.x_valid_raw=data[1][0]
self.y_valid_raw=data[1][1]
self.x_test_raw=data[2][0]
self.y_test_raw=data[2][1]
# Record input/output dimensions
self.num_classes=10
self.input_dim=self.x_train_raw.shape[1:]
# Convert data
self.y_train = np_utils.to_categorical(self.y_train_raw, self.num_classes)
self.y_valid = np_utils.to_categorical(self.y_valid_raw, self.num_classes)
self.y_test = np_utils.to_categorical(self.y_test_raw, self.num_classes)
self.x_train = self.x_train_raw.astype('float32')
self.x_valid = self.x_valid_raw.astype('float32')
self.x_test = self.x_test_raw.astype('float32')
self.x_train /= 255
self.x_valid /= 255
self.x_test /= 255
# Class names
self.class_names=['airplane','automobile','bird','cat','deer',
'dog','frog','horse','ship','truck']
def __getData (self,seed=0):
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
return self.__shuffleData(x_train,y_train,x_test,y_test,seed)
def __shuffleData (self,x_train,y_train,x_test,y_test,seed=0):
tr_perc=.75
va_perc=.15
x=np.concatenate((x_train,x_test))
y=np.concatenate((y_train,y_test))
np.random.seed(seed)
np.random.shuffle(x)
| np.random.seed(seed) | numpy.random.seed |
#!/usr/bin/env python
import logging
import numpy as np
from scipy import linalg
from timeit import default_timer as timer
from os.path import isfile
from os import remove
from tools import get_input, latex_float
from hamiltonian import main as hamiltonian
from plots import bar_plot, histogram
def readH(format):
"""Read the Hamiltonian using the given format"""
if format == 'npz':
if isfile('hamilt.npz'):
hamilt = np.load('hamilt.npz')
return hamilt['H']
else:
# Fallback to Fortran binary
format = 'fortran_bin'
# print('Hamiltonian file not found. Computing again.')
# b, d, n = get_input()
# return hamiltonian(1, b, d, n)
if format == 'fortran_bin':
_, _, n = get_input()
nn = int(n * (n + 1) / 2)
H = np.empty((nn, nn))
with open('hamilt.bin', 'rb') as h:
for i in range(nn):
H[i] = np.fromfile(h, dtype='float64', count=nn).reshape(nn)
return H
if format == 'text':
H = np.loadtxt("hamilt.out")
return H.T
def get(return_eigv=False, return_ket=False, return_index=False,
return_cmax=False, return_H=False):
"""Return the eigenvalues and optionally the eigenvectors,
the number operator form of the states(ket), the state index of the states,
the max coefficient index and the Hamiltonian"""
# Load files
H = readH('npz') # read the Hamiltonian
# Save to npz to save sapce
if not isfile('hamilt.npz'):
np.savez_compressed('hamilt.npz', H=H)
remove('hamilt.bin')
b, d, n = get_input()
n = int(n)
index = np.array([(n1, n2) for n1 in range(n) for n2 in range(n - n1)])
# Get eigenvalues and eigenvectors
if isfile('eigensystem.npz'):
print('Used cached result for: B =', b, 'D =', d, 'N =', n)
eigensystem = np.load('eigensystem.npz')
E = eigensystem['E']
eigenvectors = eigensystem['eigenvectors']
else:
start = timer()
E, eigenvectors = linalg.eigh(H, turbo=True)
end = timer()
print('Diagonalisation for N =', n, ':', end - start, 'seconds')
# Save the results
np.savez_compressed('eigensystem.npz', E=E, eigenvectors=eigenvectors)
eigenvectors = np.transpose(eigenvectors) # each eigenvector is on one row
# max coefficient in eigenvector
c_max = np.empty(eigenvectors.shape[0], dtype=int)
# The index of the largest coefficient
for i in range(eigenvectors.shape[0]):
c_max[i] = np.argmax(np.abs(eigenvectors[i]))
results = (E, )
if return_eigv:
results += (eigenvectors, )
if return_ket:
results += (index[c_max], )
if return_index:
results += (index, )
if return_cmax:
results += (c_max, )
if return_H:
results += (H, )
return results
def levels(E, ket, epsilon=1e-8, colors=''):
"""Return the degenerate subspace index and optionally the colormap"""
# irreducible representations
# 0 - unidimensional symmetric representation (reuns)
# 1 - unidimensional anti-symmetric representation (reuna)
# 2 - bidimensional representation (rebde)
ir_reps = np.zeros([E.size], dtype=np.uint8)
return_colors = len(colors)
if return_colors:
colormap = [''] * E.size # colors used
# Group energy levels such that a level contains all the eigenvalues with
# the same value
delta = np.diff(E)
avgSpacing = (E[-1] - E[0]) / E.size
relsp = delta / avgSpacing
print('levels epsilon:', epsilon)
print('avgSpacing:', avgSpacing)
levels = np.split(E, np.where(relsp > epsilon)[0] + 1)
states = np.split(ket, np.where(relsp > epsilon)[0] + 1)
# Energy difference (between two consecutive levels) histogram
histogram(delta, xlabel=r'$\Delta E$', xscale='log',
bins=np.pad(np.logspace(-15, 1, 17), (1, 0),
mode='constant'), ylabel='No. of levels',
fname='hist_delta.pdf', figsize=(5.8, 3))
# Relative spacing histogram
histogram(relsp, xscale='log', ylabel='No. of levels',
bins=np.pad(np.logspace(-13, 1, 15), (1, 0),
mode='constant'), fname='hist_relsp.pdf',
xlabel='$s$', figsize=(2.8, 3))
# Energy difference bar plot
bar_plot(delta, figsize=(5.8, 3), ylabel=r'$\Delta E$', yscale='log',
xlabel='index', fname='bar_delta.pdf', dpi=720)
# Relative spacing bar plot
bar_plot(relsp, figsize=(5.8, 3), yscale='log', fname='relsp.pdf', dpi=720,
label=r'$\varepsilon=' + latex_float(epsilon) + '$',
axhline_y=epsilon, ylabel='$s$', xlabel='index')
# Check for bidimensional representation selection problems
levels_cp = list(levels)
states_cp = list(states)
log = open('log.txt', 'a')
log.write('\n\nlevels epsilon: ' + str(epsilon))
for i in range(len(levels_cp)):
if levels_cp[i].size > 2:
local_relsp = np.diff(levels_cp[i]) / avgSpacing
log.write('\nInfo: Found ' + str(levels_cp[i].size) + ' levels ' +
'in the bidimensional representation with: \nenergy: ' +
str(levels_cp[i]) + '\ndelta: ' +
str( | np.diff(levels_cp[i]) | numpy.diff |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import unittest
import math
import numpy as np
from singa import singa_wrap as singa_api
from singa import tensor
from cuda_helper import gpu_dev, cpu_dev
def _np_bn_training(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
batch_m = x.mean(axis=(0, 2, 3), keepdims=True)
batch_v = x.var(axis=(0, 2, 3), keepdims=True)
x_norm = (x - batch_m) / np.sqrt(batch_v + e)
y_norm = x_norm * scale + bias
# https://arxiv.org/pdf/1502.03167.pdf
s = list(x.shape)
s[1] = 1
batch_v_unbiased = np.prod(s) * batch_v / (np.prod(s) - 1)
rm = momentum * batch_m + (1 - momentum) * rm
rv = momentum * batch_v_unbiased + (1 - momentum) * rv
# https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnBatchNormalizationForwardTraining
resultSaveInvVariance = 1 / np.sqrt(batch_v)
return y_norm, rm, rv, batch_m, resultSaveInvVariance
def _np_bn_testing(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
return scale * (x - rm) / np.sqrt(rv + e) + bias
def _cTensor_to_pyTensor(cTensor):
new_t = tensor.Tensor()
new_t.data = cTensor
new_t.shape = tuple(new_t.data.shape())
new_t.device = new_t.data.device()
new_t.dtype = new_t.data.data_type()
return new_t
def _ctensor_eq_ndarray(t1, np1):
d = t1.device()
t1.ToHost()
if t1.data_type() == singa_api.kInt:
np.testing.assert_array_almost_equal(t1.GetIntValue(t1.Size()),
np1.flatten())
elif t1.data_type() == singa_api.kFloat32:
np.testing.assert_array_almost_equal(t1.GetFloatValue(t1.Size()),
np1.flatten())
if np1.dtype == np.float32:
np.testing.assert_equal(t1.data_type(), singa_api.kFloat32)
elif np1.dtype == np.int32:
np.testing.assert_equal(t1.data_type(), singa_api.kInt)
np.testing.assert_array_almost_equal(t1.shape(), np1.shape)
t1.ToDevice(d)
def print_t(t1):
d = t1.device()
t1.ToHost()
if t1.data_type() == singa_api.kInt:
print(t1.GetIntValue(t1.Size()))
elif t1.data_type() == singa_api.kFloat32:
print(t1.GetFloatValue(t1.Size()))
t1.ToDevice(d)
class TestAPI(unittest.TestCase):
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_batchnorm_training_gpu(self):
dev = gpu_dev
def _run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
(y_1, rm_1, rv_1, bm_1, bv_1) = _np_bn_training(x_0,
s_0,
b_0,
rm_0,
rv_0,
momentum=m_0)
# singa api
rm_t = tensor.Tensor(device=dev, data=rm_0)
rv_t = tensor.Tensor(device=dev, data=rv_0)
hndl = singa_api.CudnnBatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
(y_2_c, bm_2_c, bv_2_c) = singa_api.GpuBatchNormForwardTraining(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data, rm_t.data, rv_t.data)
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=4)
np.testing.assert_array_almost_equal(
bm_1, tensor.to_numpy(_cTensor_to_pyTensor(bm_2_c)))
np.testing.assert_array_almost_equal(rm_1, tensor.to_numpy(rm_t))
#print(bv_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)))
np.testing.assert_array_almost_equal(
bv_1, tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)), decimal=3)
np.testing.assert_array_almost_equal(rv_1,
tensor.to_numpy(rv_t),
decimal=4)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_batchnorm_testing_gpu(self):
dev = gpu_dev
def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0)
# singa api
hndl = singa_api.CudnnBatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
y_2_c = singa_api.GpuBatchNormForwardInference(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
#print(y_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=3)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
def _softmax_api_helper(self, dev):
def _run_test(dev, org_shape, axis, aft_shape):
x_0 = np.random.random(org_shape).astype(np.float32)
x_0 = x_0 + 1000
x0 = tensor.Tensor(device=dev, data=x_0)
# test with axis
y0 = tensor._call_singa_func(singa_api.SoftMax, x0.data, axis)
# test with numpy
x_0 = x_0.reshape(aft_shape)
x_0 = x_0 - np.max(x_0)
y1 = np.divide(np.exp(x_0),
np.sum(np.exp(x_0), axis=1).reshape(x_0.shape[0],
1)) # 2d softmax
y1 = y1.reshape(org_shape)
np.testing.assert_array_almost_equal(tensor.to_numpy(y0), y1)
_run_test(dev, [2, 2], 1, [2, 2])
_run_test(dev, [2, 2], 0, [1, 4])
_run_test(dev, [2, 2], -1, [2, 2])
_run_test(dev, [2, 2], -2, [1, 4])
_run_test(dev, [2, 2, 2], 2, [4, 2])
_run_test(dev, [2, 2, 2], 1, [2, 4])
_run_test(dev, [2, 2, 2], 0, [1, 8])
_run_test(dev, [2, 2, 2], -1, [4, 2])
_run_test(dev, [2, 2, 2], -2, [2, 4])
_run_test(dev, [2, 2, 2], -3, [1, 8])
_run_test(dev, [2, 2, 2, 2], 3, [8, 2])
_run_test(dev, [2, 2, 2, 2], 2, [4, 4])
_run_test(dev, [2, 2, 2, 2], 1, [2, 8])
_run_test(dev, [2, 2, 2, 2], 0, [1, 16])
_run_test(dev, [2, 2, 2, 2], -1, [8, 2])
_run_test(dev, [2, 2, 2, 2], -2, [4, 4])
_run_test(dev, [2, 2, 2, 2], -3, [2, 8])
_run_test(dev, [2, 2, 2, 2], -4, [1, 16])
def test_softmax_api_cpu(self):
self._softmax_api_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_softmax_api_gpu(self):
self._softmax_api_helper(gpu_dev)
def _tensor_arithmetic_op_broadcast_helper(self, dev):
def _run_test(dev, singa_op, np_op, s1, s2):
x_0 = np.random.random(s1).astype(np.float32)
y_0 = np.random.random(s2).astype(np.float32)
x0 = tensor.Tensor(device=dev, data=x_0)
y0 = tensor.Tensor(device=dev, data=y_0)
z0 = tensor._call_singa_func(singa_op, x0.data, y0.data)
z0.to_host()
np.testing.assert_array_almost_equal(tensor.to_numpy(z0),
np_op(x_0, y_0))
return
for s_op, n_op in zip([
singa_api.Pow,
singa_api.__add__,
singa_api.__div__,
singa_api.__sub__,
singa_api.__mul__,
], [np.power, np.add, np.divide, np.subtract, np.multiply]):
_run_test(dev, s_op, n_op, [6], [1])
_run_test(dev, s_op, n_op, [2, 3], [2, 3])
_run_test(dev, s_op, n_op, [3, 2], [1])
_run_test(dev, s_op, n_op, [3, 1, 2], [3, 1, 1])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [5])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [1, 1, 1])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [1, 1, 1, 1])
_run_test(dev, s_op, n_op, [2, 3, 4, 5], [4, 5]) # 45+2345=2345
_run_test(dev, s_op, n_op, [3, 1, 2, 1], [3, 1, 2])
_run_test(dev, s_op, n_op, [4, 5], [2, 3, 4, 5]) # 45+2345=2345
_run_test(dev, s_op, n_op, [1, 4, 5], [2, 3, 1, 1]) # 145+2311=2345
_run_test(dev, s_op, n_op, [3, 4, 5], [2, 1, 1, 1]) # 345+2111=2345
def test_tensor_arithmetic_op_broadcast_cpu(self):
self._tensor_arithmetic_op_broadcast_helper(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_tensor_arithmetic_op_broadcast_gpu(self):
self._tensor_arithmetic_op_broadcast_helper(gpu_dev)
def _transpose_and_arithmetic_op_broadcast_helper(self, dev):
def _test(s1, s2, axis1, axis2, s3, s_op, n_op, dev):
x_0 = np.random.random(s1).astype(np.float32)
y_0 = np.random.random(s2).astype(np.float32)
x0 = tensor.Tensor(device=dev, data=x_0)
y0 = tensor.Tensor(device=dev, data=y_0)
x1 = x0.transpose(axis1)
y1 = y0.transpose(axis2)
z0 = tensor._call_singa_func(s_op, x1.data, y1.data)
z0.to_host()
np.testing.assert_array_almost_equal(
tensor.to_numpy(z0),
n_op(x_0.transpose(axis1), y_0.transpose(axis2)))
np.testing.assert_array_almost_equal(z0.shape, s3)
return
for s_op, n_op in zip([
singa_api.Pow,
singa_api.__add__,
singa_api.__div__,
singa_api.__sub__,
singa_api.__mul__,
], [np.power, np.add, np.divide, np.subtract, np.multiply]):
s1 = [1, 5, 1, 3]
s2 = [3, 1, 1, 4]
axis1 = [3, 2, 1, 0] # 3121
axis2 = [1, 0, 2, 3] # 1314
s3 = [3, 3, 5, 4]
_test(s1, s2, axis1, axis2, s3, s_op, n_op, dev)
s1 = [1, 5, 1]
s2 = [1, 3, 2]
axis1 = [2, 1, 0] # 151
axis2 = [1, 0, 2] # 312
s3 = [3, 5, 2]
_test(s1, s2, axis1, axis2, s3, s_op, n_op, dev)
s1 = [5, 1]
s2 = [1, 3]
axis1 = [1, 0] # 15
axis2 = [1, 0] # 31
s3 = [3, 5]
_test(s1, s2, axis1, axis2, s3, s_op, n_op, dev)
def test_transpose_and_arithmetic_op_broadcast_cpu(self):
self._transpose_and_arithmetic_op_broadcast_helper(cpu_dev)
def _erf(self, dev=cpu_dev):
np1 = np.random.random((2, 3)).astype(np.float32)
x1 = tensor.from_numpy(np1)
x1.to_device(dev)
y1 = tensor.from_raw_tensor(singa_api.Erf(x1.data))
# from scipy.special import erf
# np.testing.assert_array_almost_equal(erf(np1), tensor.to_numpy(y1))
def test_erf_cpu(self):
self._erf(cpu_dev)
@unittest.skipIf(not singa_api.USE_CUDA, 'CUDA is not enabled')
def test_transpose_and_arithmetic_op_broadcast_gpu(self):
self._transpose_and_arithmetic_op_broadcast_helper(gpu_dev)
def test_batchnorm_training_dnnl(self):
dev = cpu_dev
def _np_bn_training(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape,
(1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
batch_m = x.mean(axis=(0, 2, 3), keepdims=True)
batch_v = x.var(axis=(0, 2, 3), keepdims=True)
x_norm = (x - batch_m) / np.sqrt(batch_v + e)
y_norm = x_norm * scale + bias
# https://arxiv.org/pdf/1502.03167.pdf
s = list(x.shape)
s[1] = 1
batch_v_unbiased = np.prod(s) * batch_v / (np.prod(s) - 1)
rm = momentum * batch_m + (1 - momentum) * rm
rv = momentum * batch_v_unbiased + (1 - momentum) * rv
# https://docs.nvidia.com/deeplearning/sdk/cudnn-developer-guide/index.html#cudnnBatchNormalizationForwardTraining
# this value is useful for bwd computation
resultSaveInvVariance = 1 / np.sqrt(batch_v)
return y_norm, rm, rv, batch_m, resultSaveInvVariance
def _run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
(y_1, rm_1, rv_1, bm_1, bv_1) = _np_bn_training(x_0,
s_0,
b_0,
rm_0,
rv_0,
momentum=m_0)
# singa api
hndl = singa_api.BatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
(y_2_c, bm_2_c, bv_2_c) = singa_api.CpuBatchNormForwardTraining(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
np.testing.assert_array_almost_equal(
bm_1, tensor.to_numpy(_cTensor_to_pyTensor(bm_2_c)), decimal=5)
#print(bv_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)))
#np.testing.assert_array_almost_equal(
# bv_1, tensor.to_numpy(_cTensor_to_pyTensor(bv_2_c)), decimal=3)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.0)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rm_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
rv_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
_run_training(x_0, s_0, b_0, rm_0, rv_0, m_0=0.2)
def test_batchnorm_testing_dnnl(self):
dev = cpu_dev
def _np_bn_testing(x, scale, bias, rm, rv, momentum=0.1, e=1e-5):
channel = x.shape[1]
np.testing.assert_array_almost_equal(scale.shape,
(1, channel, 1, 1))
np.testing.assert_array_almost_equal(bias.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rm.shape, (1, channel, 1, 1))
np.testing.assert_array_almost_equal(rv.shape, (1, channel, 1, 1))
return scale * (x - rm) / np.sqrt(rv + e) + bias
def _run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=0.1):
# np api
y_1 = _np_bn_testing(x_0, s_0, b_0, rm_0, rv_0, momentum=m_0)
# singa api
hndl = singa_api.BatchNormHandle(
m_0,
tensor.Tensor(device=dev, data=x_0).data)
y_2_c = singa_api.CpuBatchNormForwardInference(
hndl,
tensor.Tensor(device=dev, data=x_0).data,
tensor.Tensor(device=dev, data=s_0).data,
tensor.Tensor(device=dev, data=b_0).data,
tensor.Tensor(device=dev, data=rm_0).data,
tensor.Tensor(device=dev, data=rv_0).data)
#print(y_1)
#print(tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)))
np.testing.assert_array_almost_equal(
y_1, tensor.to_numpy(_cTensor_to_pyTensor(y_2_c)), decimal=5)
return
x_0 = np.array([1, 1, 1, 1, 2, 2, 2, 2, 10, 10, 10, 10, 20, 20, 20, 20],
dtype=np.float32).reshape((2, 2, 2, 2))
s_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
b_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rm_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
rv_0 = np.array([1, 10], dtype=np.float32).reshape((1, 2, 1, 1))
_run_testing(x_0, s_0, b_0, rm_0, rv_0, m_0=1.0)
c = 10
x_0 = np.random.random((10, c, 20, 20)).astype(np.float32)
s_0 = np.random.random((1, c, 1, 1)).astype(np.float32)
b_0 = | np.random.random((1, c, 1, 1)) | numpy.random.random |
"""
It contains the functions to compute the cases that presents an analytical
solutions.
All functions output the analytical solution in kcal/mol
"""
import numpy
from numpy import pi
from scipy import special, linalg
from scipy.misc import factorial
from math import gamma
def an_spherical(q, xq, E_1, E_2, E_0, R, N):
"""
It computes the analytical solution of the potential of a sphere with
Nq charges inside.
Took from Kirkwood (1934).
Arguments
----------
q : array, charges.
xq : array, positions of the charges.
E_1: float, dielectric constant inside the sphere.
E_2: float, dielectric constant outside the sphere.
E_0: float, dielectric constant of vacuum.
R : float, radius of the sphere.
N : int, number of terms desired in the spherical harmonic expansion.
Returns
--------
PHI: array, reaction potential.
"""
PHI = numpy.zeros(len(q))
for K in range(len(q)):
rho = numpy.sqrt(numpy.sum(xq[K]**2))
zenit = numpy.arccos(xq[K, 2] / rho)
azim = numpy.arctan2(xq[K, 1], xq[K, 0])
phi = 0. + 0. * 1j
for n in range(N):
for m in range(-n, n + 1):
sph1 = special.sph_harm(m, n, zenit, azim)
cons1 = rho**n / (E_1 * E_0 * R**(2 * n + 1)) * (E_1 - E_2) * (
n + 1) / (E_1 * n + E_2 * (n + 1))
cons2 = 4 * pi / (2 * n + 1)
for k in range(len(q)):
rho_k = numpy.sqrt(numpy.sum(xq[k]**2))
zenit_k = numpy.arccos(xq[k, 2] / rho_k)
azim_k = numpy.arctan2(xq[k, 1], xq[k, 0])
sph2 = numpy.conj(special.sph_harm(m, n, zenit_k, azim_k))
phi += cons1 * cons2 * q[K] * rho_k**n * sph1 * sph2
PHI[K] = numpy.real(phi) / (4 * pi)
return PHI
def get_K(x, n):
"""
It computes the polinomials K needed for Kirkwood-1934 solutions.
K_n(x) in Equation 4 in Kirkwood 1934.
Arguments
----------
x: float, evaluation point of K.
n: int, number of terms desired in the expansion.
Returns
--------
K: float, polinomials K.
"""
K = 0.
n_fact = factorial(n)
n_fact2 = factorial(2 * n)
for s in range(n + 1):
K += 2**s * n_fact * factorial(2 * n - s) / (factorial(s) * n_fact2 *
factorial(n - s)) * x**s
return K
def an_P(q, xq, E_1, E_2, R, kappa, a, N):
"""
It computes the solvation energy according to Kirkwood-1934.
Arguments
----------
q : array, charges.
xq : array, positions of the charges.
E_1 : float, dielectric constant inside the sphere.
E_2 : float, dielectric constant outside the sphere.
R : float, radius of the sphere.
kappa: float, reciprocal of Debye length.
a : float, radius of the Stern Layer.
N : int, number of terms desired in the polinomial expansion.
Returns
--------
E_P : float, solvation energy.
"""
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
PHI = numpy.zeros(len(q))
for K in range(len(q)):
rho = numpy.sqrt(numpy.sum(xq[K]**2))
zenit = numpy.arccos(xq[K, 2] / rho)
azim = numpy.arctan2(xq[K, 1], xq[K, 0])
phi = 0. + 0. * 1j
for n in range(N):
for m in range(-n, n + 1):
P1 = special.lpmv(numpy.abs(m), n, numpy.cos(zenit))
Enm = 0.
for k in range(len(q)):
rho_k = numpy.sqrt(numpy.sum(xq[k]**2))
zenit_k = numpy.arccos(xq[k, 2] / rho_k)
azim_k = numpy.arctan2(xq[k, 1], xq[k, 0])
P2 = special.lpmv(numpy.abs(m), n, numpy.cos(zenit_k))
Enm += q[k] * rho_k**n * factorial(n - numpy.abs(
m)) / factorial(n + numpy.abs(m)) * P2 * numpy.exp(
-1j * m * azim_k)
C2 = (kappa * a)**2 * get_K(kappa * a, n - 1) / (
get_K(kappa * a, n + 1) + n * (E_2 - E_1) / (
(n + 1) * E_2 + n * E_1) * (R / a)**(2 * n + 1) *
(kappa * a)**2 * get_K(kappa * a, n - 1) / ((2 * n - 1) *
(2 * n + 1)))
C1 = Enm / (E_2 * E_0 * a**
(2 * n + 1)) * (2 * n + 1) / (2 * n - 1) * (E_2 / (
(n + 1) * E_2 + n * E_1))**2
if n == 0 and m == 0:
Bnm = Enm / (E_0 * R) * (
1 / E_2 - 1 / E_1) - Enm * kappa * a / (
E_0 * E_2 * a * (1 + kappa * a))
else:
Bnm = 1. / (E_1 * E_0 * R**(2 * n + 1)) * (E_1 - E_2) * (
n + 1) / (E_1 * n + E_2 * (n + 1)) * Enm - C1 * C2
phi += Bnm * rho**n * P1 * numpy.exp(1j * m * azim)
PHI[K] = numpy.real(phi) / (4 * pi)
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J)
E_P = 0.5 * C0 * numpy.sum(q * PHI)
return E_P
def two_sphere(a, R, kappa, E_1, E_2, q):
"""
It computes the analytical solution of a spherical surface and a spherical
molecule with a center charge, both of radius R.
Follows Cooper&Barba 2016
Arguments
----------
a : float, center to center distance.
R : float, radius of surface and molecule.
kappa: float, reciprocal of Debye length.
E_1 : float, dielectric constant inside the sphere.
E_2 : float, dielectric constant outside the sphere.
q : float, number of qe to be asigned to the charge.
Returns
--------
Einter : float, interaction energy.
E1sphere: float, solvation energy of one sphere.
E2sphere: float, solvation energy of two spheres together.
Note:
Einter should match (E2sphere - 2xE1sphere)
"""
N = 20 # Number of terms in expansion.
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * a)
K1p = index / (kappa * a) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.kv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * K1p
I1 = special.iv(index2, kappa * a)
I1p = index / (kappa * a) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.iv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * I1p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((N, N), float)
E_hat = E_1 / E_2
for i in range(N):
for j in range(N):
M[i, j] = (2 * i + 1) * B[i, j] * (
kappa * i1p[i] - E_hat * i * i1[i] / a)
if i == j:
M[i, j] += kappa * k1p[i] - E_hat * i * k1[i] / a
RHS = numpy.zeros(N)
RHS[0] = -E_hat * q / (4 * pi * E_1 * a * a)
a_coeff = linalg.solve(M, RHS)
a0 = a_coeff[0]
a0_inf = -E_hat * q / (4 * pi * E_1 * a * a) * 1 / (kappa * k1p[0])
phi_2 = a0 * k1[0] + i1[0] * numpy.sum(a_coeff * B[:, 0]) - q / (4 * pi *
E_1 * a)
phi_1 = a0_inf * k1[0] - q / (4 * pi * E_1 * a)
phi_inter = phi_2 - phi_1
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
Einter = 0.5 * CC0 * q * phi_inter
E1sphere = 0.5 * CC0 * q * phi_1
E2sphere = 0.5 * CC0 * q * phi_2
return Einter, E1sphere, E2sphere
def constant_potential_single_point(phi0, a, r, kappa):
"""
It computes the potential in a point 'r' due to a spherical surface
with constant potential phi0, immersed in water. Solution to the
Poisson-Boltzmann problem.
Arguments
----------
phi0 : float, constant potential on the surface of the sphere.
a : float, radius of the sphere.
r : float, distance from the center of the sphere to the evaluation
point.
kappa: float, reciprocal of Debye length.
Returns
--------
phi : float, potential.
"""
phi = a / r * phi0 * numpy.exp(kappa * (a - r))
return phi
def constant_charge_single_point(sigma0, a, r, kappa, epsilon):
"""
It computes the potential in a point 'r' due to a spherical surface
with constant charge sigma0 immersed in water. Solution to the
Poisson-Boltzmann problem. .
Arguments
----------
sigma0 : float, constant charge on the surface of the sphere.
a : float, radius of the sphere.
r : float, distance from the center of the sphere to the evaluation
point.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
phi : float, potential.
"""
dphi0 = -sigma0 / epsilon
phi = -dphi0 * a * a / (1 + kappa * a) * numpy.exp(kappa * (a - r)) / r
return phi
def constant_potential_single_charge(phi0, radius, kappa, epsilon):
"""
It computes the surface charge of a sphere at constant potential, immersed
in water.
Arguments
----------
phi0 : float, constant potential on the surface of the sphere.
radius : float, radius of the sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant .
Returns
--------
sigma : float, surface charge.
"""
dphi = -phi0 * ((1. + kappa * radius) / radius)
sigma = -epsilon * dphi # Surface charge
return sigma
def constant_charge_single_potential(sigma0, radius, kappa, epsilon):
"""
It computes the surface potential on a sphere at constant charged, immersed
in water.
Arguments
----------
sigma0 : float, constant charge on the surface of the sphere.
radius : float, radius of the sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
phi : float, potential.
"""
dphi = -sigma0 / epsilon
phi = -dphi * radius / (1. + kappa * radius) # Surface potential
return phi
def constant_potential_twosphere(phi01, phi02, r1, r2, R, kappa, epsilon):
"""
It computes the solvation energy of two spheres at constant potential,
immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_solv : float, solvation energy.
"""
kT = 4.1419464e-21 # at 300K
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
C0 = kT / qe
phi01 /= C0
phi02 /= C0
k1 = special.kv(0.5, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k2 = special.kv(0.5, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
B00 = special.kv(0.5, kappa * R) * numpy.sqrt(pi / (2 * kappa * R))
# k1 = special.kv(0.5,kappa*r1)*numpy.sqrt(2/(pi*kappa*r1))
# k2 = special.kv(0.5,kappa*r2)*numpy.sqrt(2/(pi*kappa*r2))
# B00 = special.kv(0.5,kappa*R)*numpy.sqrt(2/(pi*kappa*R))
i1 = special.iv(0.5, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i2 = special.iv(0.5, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
a0 = (phi02 * B00 * i1 - phi01 * k2) / (B00 * B00 * i2 * i1 - k1 * k2)
b0 = (phi02 * k1 - phi01 * B00 * i2) / (k2 * k1 - B00 * B00 * i1 * i2)
U1 = 2 * pi * phi01 * (phi01 * numpy.exp(kappa * r1) * (kappa * r1) *
(kappa * r1) / numpy.sinh(kappa * r1) - pi * a0 /
(2 * i1))
U2 = 2 * pi * phi02 * (phi02 * numpy.exp(kappa * r2) * (kappa * r2) *
(kappa * r2) / numpy.sinh(kappa * r2) - pi * b0 /
(2 * i2))
print('U1: {}'.format(U1))
print('U2: {}'.format(U2))
print('E: {}'.format(U1 + U2))
C1 = C0 * C0 * epsilon / kappa
u1 = U1 * C1
u2 = U2 * C1
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_solv = CC0 * (u1 + u2)
return E_solv
def constant_potential_twosphere_2(phi01, phi02, r1, r2, R, kappa, epsilon):
"""
It computes the solvation energy of two spheres at constant potential,
immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_solv : float, solvation energy.
"""
kT = 4.1419464e-21 # at 300K
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
h = R - r1 - r2
# E_inter = r1*r2*epsilon/(4*R) * ( (phi01+phi02)**2 * log(1+numpy.exp(-kappa*h)) + (phi01-phi02)**2*log(1-numpy.exp(-kappa*h)) )
# E_inter = epsilon*r1*phi01**2/2 * log(1+numpy.exp(-kappa*h))
E_solv = epsilon * r1 * r2 * (phi01**2 + phi02**2) / (4 * (r1 + r2)) * (
(2 * phi01 * phi02) / (phi01**2 + phi02**2) * log(
(1 + numpy.exp(-kappa * h)) /
(1 - numpy.exp(-kappa * h))) + log(1 - numpy.exp(-2 * kappa * h)))
CC0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_solv *= CC0
return E_solv
def constant_potential_single_energy(phi0, r1, kappa, epsilon):
"""
It computes the total energy of a single sphere at constant potential,
inmmersed in water.
Arguments
----------
phi0 : float, constant potential on the surface of the sphere.
r1 : float, radius of sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E : float, total energy.
"""
N = 1 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
a0_inf = phi0 / k1[0]
U1_inf = a0_inf * k1p[0]
C1 = 2 * pi * kappa * phi0 * r1 * r1 * epsilon
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E = C0 * C1 * U1_inf
return E
def constant_charge_single_energy(sigma0, r1, kappa, epsilon):
"""
It computes the total energy of a single sphere at constant charge,
inmmersed in water.
Arguments
----------
sigma0 : float, constant charge on the surface of the sphere.
r1 : float, radius of sphere.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E : float, total energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
a0_inf = -sigma0 / (epsilon * kappa * k1p[0])
U1_inf = a0_inf * k1[0]
C1 = 2 * pi * sigma0 * r1 * r1
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E = C0 * C1 * U1_inf
return E
def constant_potential_twosphere_dissimilar(phi01, phi02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy for dissimilar spheres at constant
potential, immersed in water.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * i1[j] / k2[n]
M[j + N, n] = (2 * j + 1) * B[j, n] * i2[j] / k1[n]
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = phi01
RHS[N] = phi02
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / k1
b = coeff[N:2 * N] / k2
a0 = a[0]
a0_inf = phi01 / k1[0]
b0 = b[0]
b0_inf = phi02 / k2[0]
U1_inf = a0_inf * k1p[0]
U1_h = a0 * k1p[0] + i1p[0] * numpy.sum(b * B[:, 0])
U2_inf = b0_inf * k2p[0]
U2_h = b0 * k2p[0] + i2p[0] * numpy.sum(a * B[:, 0])
C1 = 2 * pi * kappa * phi01 * r1 * r1 * epsilon
C2 = 2 * pi * kappa * phi02 * r2 * r2 * epsilon
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_inter = C0 * (C1 * (U1_h - U1_inf) + C2 * (U2_h - U2_inf))
return E_inter
def constant_charge_twosphere_dissimilar(sigma01, sigma02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy between two dissimilar spheres at
constant charge, immersed in water.
Arguments
----------
sigma01: float, constant charge on the surface of the sphere 1.
sigma02: float, constant charge on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * r1 * i1p[j] / (r2 * k2p[n])
M[j + N, n] = (2 * j + 1) * B[j, n] * r2 * i2p[j] / (r1 * k1p[n])
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = sigma01 * r1 / epsilon
RHS[N] = sigma02 * r2 / epsilon
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / (-r1 * kappa * k1p)
b = coeff[N:2 * N] / (-r2 * kappa * k2p)
a0 = a[0]
a0_inf = -sigma01 / (epsilon * kappa * k1p[0])
b0 = b[0]
b0_inf = -sigma02 / (epsilon * kappa * k2p[0])
U1_inf = a0_inf * k1[0]
U1_h = a0 * k1[0] + i1[0] * numpy.sum(b * B[:, 0])
U2_inf = b0_inf * k2[0]
U2_h = b0 * k2[0] + i2[0] * numpy.sum(a * B[:, 0])
C1 = 2 * pi * sigma01 * r1 * r1
C2 = 2 * pi * sigma02 * r2 * r2
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
E_inter = C0 * (C1 * (U1_h - U1_inf) + C2 * (U2_h - U2_inf))
return E_inter
def molecule_constant_potential(q, phi02, r1, r2, R, kappa, E_1, E_2):
"""
It computes the interaction energy between a molecule (sphere with
point-charge in the center) and a sphere at constant potential, immersed
in water.
Arguments
----------
q : float, number of qe to be asigned to the charge.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1, i.e the molecule.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
E_1 : float, dielectric constant inside the sphere/molecule.
E_2 : float, dielectric constant outside the sphere/molecule.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
E_hat = E_1 / E_2
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * (
kappa * i1p[j] / k2[n] - E_hat * j / r1 * i1[j] / k2[n])
M[j + N, n] = (2 * j + 1) * B[j, n] * i2[j] * 1 / (
kappa * k1p[n] - E_hat * n / r1 * k1[n])
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = -E_hat * q / (4 * pi * E_1 * r1 * r1)
RHS[N] = phi02
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / (kappa * k1p - E_hat * numpy.arange(N) / r1 * k1)
b = coeff[N:2 * N] / k2
a0 = a[0]
a0_inf = -E_hat * q / (4 * pi * E_1 * r1 * r1) * 1 / (kappa * k1p[0])
b0 = b[0]
b0_inf = phi02 / k2[0]
phi_inf = a0_inf * k1[0] - q / (4 * pi * E_1 * r1)
phi_h = a0 * k1[0] + i1[0] * numpy.sum(b * B[:, 0]) - q / (4 * pi * E_1 *
r1)
phi_inter = phi_h - phi_inf
U_inf = b0_inf * k2p[0]
U_h = b0 * k2p[0] + i2p[0] * numpy.sum(a * B[:, 0])
U_inter = U_h - U_inf
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
C1 = q * 0.5
C2 = 2 * pi * kappa * phi02 * r2 * r2 * E_2
E_inter = C0 * (C1 * phi_inter + C2 * U_inter)
return E_inter
def molecule_constant_charge(q, sigma02, r1, r2, R, kappa, E_1, E_2):
"""
It computes the interaction energy between a molecule (sphere with
point-charge in the center) and a sphere at constant charge, immersed
in water.
Arguments
----------
q : float, number of qe to be asigned to the charge.
sigma02: float, constant charge on the surface of the sphere 2.
r1 : float, radius of sphere 1, i.e the molecule.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
E_1 : float, dielectric constant inside the sphere/molecule.
E_2 : float, dielectric constant outside the sphere/molecule.
Returns
--------
E_inter: float, interaction energy.
"""
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * r1)
K1p = index / (kappa * r1) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.kv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * K1p
K2 = special.kv(index2, kappa * r2)
K2p = index / (kappa * r2) * K2[0:-1] - K2[1:]
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
k2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.kv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * K2p
I1 = special.iv(index2, kappa * r1)
I1p = index / (kappa * r1) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r1)**(3 / 2.)) * special.iv(
index, kappa * r1) + numpy.sqrt(pi / (2 * kappa * r1)) * I1p
I2 = special.iv(index2, kappa * r2)
I2p = index / (kappa * r2) * I2[0:-1] + I2[1:]
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i2p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * r2)**(3 / 2.)) * special.iv(
index, kappa * r2) + numpy.sqrt(pi / (2 * kappa * r2)) * I2p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
E_hat = E_1 / E_2
M = numpy.zeros((2 * N, 2 * N), float)
for j in range(N):
for n in range(N):
M[j, n + N] = (2 * j + 1) * B[j, n] * (
i1p[j] / k2p[n] - E_hat * j / r1 * i1[j] / (kappa * k2p[n]))
M[j + N, n] = (2 * j + 1) * B[j, n] * i2p[j] * kappa * 1 / (
kappa * k1p[n] - E_hat * n / r1 * k1[n])
if n == j:
M[j, n] = 1
M[j + N, n + N] = 1
RHS = numpy.zeros(2 * N)
RHS[0] = -E_hat * q / (4 * pi * E_1 * r1 * r1)
RHS[N] = -sigma02 / E_2
coeff = linalg.solve(M, RHS)
a = coeff[0:N] / (kappa * k1p - E_hat * numpy.arange(N) / r1 * k1)
b = coeff[N:2 * N] / (kappa * k2p)
a0 = a[0]
a0_inf = -E_hat * q / (4 * pi * E_1 * r1 * r1) * 1 / (kappa * k1p[0])
b0 = b[0]
b0_inf = -sigma02 / (E_2 * kappa * k2p[0])
phi_inf = a0_inf * k1[0] - q / (4 * pi * E_1 * r1)
phi_h = a0 * k1[0] + i1[0] * numpy.sum(b * B[:, 0]) - q / (4 * pi * E_1 *
r1)
phi_inter = phi_h - phi_inf
U_inf = b0_inf * k2[0]
U_h = b0 * k2[0] + i2[0] * numpy.sum(a * B[:, 0])
U_inter = U_h - U_inf
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
C1 = q * 0.5
C2 = 2 * pi * sigma02 * r2 * r2
E_inter = C0 * (C1 * phi_inter + C2 * U_inter)
return E_inter
def constant_potential_twosphere_identical(phi01, phi02, r1, r2, R, kappa,
epsilon):
"""
It computes the interaction energy for two spheres at constants surface
potential, according to Carnie&Chan-1993.
Arguments
----------
phi01 : float, constant potential on the surface of the sphere 1.
phi02 : float, constant potential on the surface of the sphere 2.
r1 : float, radius of sphere 1.
r2 : float, radius of sphere 2.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Note:
Even though it admits phi01 and phi02, they should be identical; and
the same is applicable to r1 and r2.
Returns
--------
E_inter: float, interaction energy.
"""
# From Carnie+Chan 1993
N = 20 # Number of terms in expansion
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index = numpy.arange(N, dtype=float) + 0.5
k1 = special.kv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
k2 = special.kv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
i1 = special.iv(index, kappa * r1) * numpy.sqrt(pi / (2 * kappa * r1))
i2 = special.iv(index, kappa * r2) * numpy.sqrt(pi / (2 * kappa * r2))
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * numpy.sqrt(pi / (2 * kappa * R))
B[n, m] += Anm * kB
M = numpy.zeros((N, N), float)
for i in range(N):
for j in range(N):
M[i, j] = (2 * i + 1) * B[i, j] * i1[i]
if i == j:
M[i, j] += k1[i]
RHS = numpy.zeros(N)
RHS[0] = phi01
a = linalg.solve(M, RHS)
a0 = a[0]
U = 4 * pi * (-pi / 2 * a0 / phi01 * 1 / numpy.sinh(kappa * r1) + kappa *
r1 + kappa * r1 / numpy.tanh(kappa * r1))
C0 = qe**2 * Na * 1e-3 * 1e10 / (cal2J * E_0)
C1 = r1 * epsilon * phi01 * phi01
E_inter = U * C1 * C0
return E_inter
def constant_charge_twosphere_identical(sigma, a, R, kappa, epsilon):
"""
It computes the interaction energy for two spheres at constants surface
charge, according to Carnie&Chan-1993.
Arguments
----------
sigma : float, constant charge on the surface of the spheres.
a : float, radius of spheres.
R : float, distance center to center.
kappa : float, reciprocal of Debye length.
epsilon: float, water dielectric constant.
Returns
--------
E_inter: float, interaction energy.
"""
# From Carnie+Chan 1993
N = 10 # Number of terms in expansion
E_p = 0 # Permitivitty inside sphere
qe = 1.60217646e-19
Na = 6.0221415e23
E_0 = 8.854187818e-12
cal2J = 4.184
index2 = numpy.arange(N + 1, dtype=float) + 0.5
index = index2[0:-1]
K1 = special.kv(index2, kappa * a)
K1p = index / (kappa * a) * K1[0:-1] - K1[1:]
k1 = special.kv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
k1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.kv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * K1p
I1 = special.iv(index2, kappa * a)
I1p = index / (kappa * a) * I1[0:-1] + I1[1:]
i1 = special.iv(index, kappa * a) * numpy.sqrt(pi / (2 * kappa * a))
i1p = -numpy.sqrt(pi / 2) * 1 / (2 * (kappa * a)**(3 / 2.)) * special.iv(
index, kappa * a) + numpy.sqrt(pi / (2 * kappa * a)) * I1p
B = numpy.zeros((N, N), dtype=float)
for n in range(N):
for m in range(N):
for nu in range(N):
if n >= nu and m >= nu:
g1 = gamma(n - nu + 0.5)
g2 = gamma(m - nu + 0.5)
g3 = gamma(nu + 0.5)
g4 = gamma(m + n - nu + 1.5)
f1 = factorial(n + m - nu)
f2 = factorial(n - nu)
f3 = factorial(m - nu)
f4 = factorial(nu)
Anm = g1 * g2 * g3 * f1 * (n + m - 2 * nu + 0.5) / (
pi * g4 * f2 * f3 * f4)
kB = special.kv(n + m - 2 * nu + 0.5, kappa *
R) * | numpy.sqrt(pi / (2 * kappa * R)) | numpy.sqrt |
"""Tests for processors utilities."""
import numpy as np
import rimseval.processor_utils as pu
import rimseval.data_io.crd_utils as crdu
def test_create_packages():
"""Create packages from data."""
ions_per_shot = np.array([0, 0, 1, 0, 2, 0, 3, 2, 1, 4])
all_tofs = np.array([20, 25, 70, 53, 68, 11, 54, 12, 68, 99, 65, 48, 7])
bin_start = all_tofs.min()
bin_end = all_tofs.max()
len_data = bin_end - bin_start + 1
tofs_mapper = crdu.shot_to_tof_mapper(ions_per_shot)
assert ions_per_shot.sum() == len(all_tofs)
assert all_tofs.max() < len_data + bin_start # sanity checks for setup
# packages expected
pkg_length = 4
nof_pkg = len(ions_per_shot) // pkg_length
if len(ions_per_shot) % pkg_length > 0:
nof_pkg += 1
pkg_nof_shots_exp = np.zeros(nof_pkg) + pkg_length
if (tmp := len(ions_per_shot) % pkg_length) > 0:
pkg_nof_shots_exp[-1] = tmp
pkg_data_exp = np.zeros((nof_pkg, len_data))
for it, shot in enumerate(ions_per_shot):
pkg_it = it // pkg_length
mapper = tofs_mapper[it]
tofs = all_tofs[mapper[0] : mapper[1]]
for tof in tofs:
pkg_data_exp[pkg_it][int(tof) - bin_start] += 1
pkg_data_rec, pkg_nof_shots_rec = pu.create_packages(
pkg_length, tofs_mapper, all_tofs
)
np.testing.assert_equal(pkg_nof_shots_rec, pkg_nof_shots_exp)
np.testing.assert_equal(pkg_data_rec, pkg_data_exp)
def test_integrals_bg_corr():
"""Background correction for defined integrals."""
integrals = np.array([[10, np.sqrt(10)], [40, np.sqrt(40)]])
int_names = np.array(["28Si", "29Si"])
int_ch = np.array([30, 40])
bgs = np.array([[1, np.sqrt(1)], [2, np.sqrt(2)], [3, np.sqrt(3)]])
bgs_names = np.array(["28Si", "28Si", "29Si"])
bgs_ch = np.array([20, 10, 50])
# manual calculations for expected values
bgs_cnt = bgs[:, 0]
bgs_norm = np.array(
[(bgs_cnt[0] / bgs_ch[0] + bgs_cnt[1] / bgs_ch[1]) / 2, bgs_cnt[2] / bgs_ch[2]]
)
bgs_norm_unc = np.array(
[
(np.sqrt(bgs_cnt[0]) / bgs_ch[0] + np.sqrt(bgs_cnt[1]) / bgs_ch[1]) / 2,
np.sqrt(bgs_cnt[2]) / bgs_ch[2],
]
)
integrals_corr = integrals[:, 0] - int_ch * bgs_norm
integrals_corr_unc = np.sqrt(integrals[:, 0] + bgs_norm_unc ** 2)
integrals_exp = np.array(
[
[integrals_corr[it], integrals_corr_unc[it]]
for it in range(len(integrals_corr))
]
)
integrals_rec, integrals_pkg_rec = pu.integrals_bg_corr(
integrals, int_names, int_ch, bgs, bgs_names, bgs_ch
)
np.testing.assert_equal(integrals_rec, integrals_exp)
assert integrals_pkg_rec is None
def test_integrals_bg_corr_pkg():
"""Background correction for defined integrals with packages."""
integrals = np.array([[10, np.sqrt(10)], [40, np.sqrt(40)]])
integrals_pkg = np.array([integrals])
int_names = | np.array(["28Si", "29Si"]) | numpy.array |
from unittest import TestCase
import numpy as np
from sklearn.metrics import precision_score, auc, roc_curve, cohen_kappa_score
from source.analysis.performance.performance_builder import PerformanceBuilder
from source.analysis.performance.raw_performance import RawPerformance
from source.analysis.performance.epoch_performance import SleepWakePerformance
from test.test_helper import TestHelper
class TestPerformanceBuilder(TestCase):
def test_build_from_raw(self):
threshold = 0.2
raw_performance = RawPerformance(true_labels=np.array([1, 0]),
class_probabilities=np.array([[0.1, 0.9], [0.3, 0.7]]))
predicted_labels = np.array([1, 1])
kappa = cohen_kappa_score(raw_performance.true_labels, predicted_labels)
sleep_predictive_value = precision_score(raw_performance.true_labels, predicted_labels, pos_label=1)
wake_predictive_value = precision_score(raw_performance.true_labels, predicted_labels, pos_label=0)
false_positive_rates, true_positive_rates, thresholds = roc_curve(raw_performance.true_labels,
raw_performance.class_probabilities[:, 1],
pos_label=1,
drop_intermediate=False)
auc_value = auc(false_positive_rates, true_positive_rates)
expected_performance = SleepWakePerformance(accuracy= | np.float64(0.5) | numpy.float64 |
__author__ = 'stephen'
import numpy as np
import scipy.io
import scipy.sparse
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.pylab as pylab
from .utils import get_subindices
import matplotlib.ticker as mtick
from collections import Counter
from sklearn.neighbors.kde import KernelDensity
from scipy import stats
from mpl_toolkits.axes_grid1 import make_axes_locatable
def plot_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1, potential=False):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
plt.rc("font", size=10)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
if potential is False: #plot Alanine Dipeptide
for i in clusters:
if i != outliers:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7)#, color=colors_jet[i])
#else:
# point = np.where(labels == i)
# plt.plot(phi_angles[point], psi_angles[point], '.', markersize=1.0, alpha=0.7, color='black') # , color=colors_jet[i])
plt.title("Alanine Dipeptide " + name + " states", fontsize=10)
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-110, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
else: # if plot 2D potential
plt.figure(figsize=(10, 10))
for i in clusters:
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)], '.', markersize=1.0, alpha=0.7) #markersize=20.0, color=colors_jet[i])
#plt.plot(phi_angles[np.where(labels == i)],
# psi_angles[np.where(labels == i)],
# '.', color=colors_jet[i], label='State %d' % i)
#plt.title("2D potential " + name + " states", fontsize=20)
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.xlabel(r"$\phi$", fontsize=25)
plt.ylabel(r"$\psi$", fontsize=25)
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_each_cluster(labels, phi_angles, psi_angles, name, outliers=-1, step=1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
clusters = np.unique(labels)
if step > 1:
clusters = clusters[0:len(clusters):step]
colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(clusters)+1))
for i in np.unique(clusters):
if i != outliers:
plt.plot(phi_angles[np.where(labels == i)],
psi_angles[np.where(labels == i)],
'x', color=colors_jet[i], label='State %d' % i)
#plt.title("Alanine Dipeptide " + name + " state_" + str(i))
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
# Save the result figure
plt.savefig('./'+ name + " state_" + str(i)+'.png', dpi = 400)
plt.close()
#plt.show()
def contour_cluster(labels, phi_angles, psi_angles, name, outliers=-1):
'''
:param labels: the assignments after clustering or lumping
:param phi_angles: the phi angles
:param psi_angles: the psi angles
:param name: the name of the result pictures
:param outliers: outliers default is -1
:return: None
'''
# lables_array = np.array(labels)
# colors_jet = plt.cm.jet(np.linspace(0, 1, np.max(lables_array)+1))
for i in np.unique(labels):
#if i != outliers:
if i == 1:
print("i=", i)
x = phi_angles[np.where(labels == i)]
y = psi_angles[np.where(labels == i)]
indices = get_subindices(assignments=x, state=None, samples=1000)
x = x[indices]
y = y[indices]
X, Y= np.meshgrid(x, y)
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
#kde = KernelDensity(kernel='gaussian', bandwidth=0.2)
#kde_results = kde.score_samples([x,y])
#X, Y, Z = np.meshgrid(x, y, kde_results)
#Z = np.reshape(kernel([x,y]).T, x.shape)
#Z1 = mlab.bivariate_normal(X, Y, 5.0, 5.0, 0.0, 0.0)
#Z2 = mlab.bivariate_normal(X, Y, 7.5, 2.5, 5, 5)
# difference of Gaussians
#Z = 10.0 * (Z2 - Z1)
#step = Z.max()-Z.min()/10
#print "Z min:",Z.min(), "Z.max:", Z.max(), "step:", step
#levels = np.arange(Z.min(), Z.min(), Z.max())
#print levels
plt.contour(X, Y, Z, origin='lower') #, linewidths=Z.min(), levels=levels)
plt.title("Alanine Dipeptide " + name + " states")
plt.xlabel(r"$\phi$")
plt.ylabel(r"$\psi$")
plt.xlim([-180, 180])
plt.ylim([-180, 180])
# Save the result figure
plt.savefig('./'+name+'.png', dpi=400)
plt.close()
#plt.show()
def plot_matrix(tProb_=None, name=None):
'''
if labels is not None:
n_states = len(set(labels)) - (1 if -1 in labels else 0)
print 'n_states=', n_states
#diagC = tProb_.diagonal()
length = len(labels)
print "length=", length
Cmn = scipy.sparse.lil_matrix(n_states, n_states, dtype=np.float32)
Cmn = np.zeros((n_states, n_states))
print "size of tProb", tProb_.shape
if scipy.sparse.issparse(tProb_):
tProb_ = tProb_.todense()
for i in xrange(length):
for j in xrange(length):
Cmn[labels[i], labels[j]] += tProb_[i, j]
#for i in xrange(n_states):
#Cmn[i,i] += diagC[i]
# for j in xrange(n_states):
# Cmn[i, j] += Cmn[j, i]
# Cmn[j, i] = Cmn[i, j]
for j in xrange(n_states):
sum_row = np.sum(Cmn[j,:])
if sum_row is not 0:
Cmn[j,:] /= sum_row
pylab.matshow(Cmn, cmap=plt.cm.OrRd)
else:
'''
pylab.matshow(tProb_, cmap=plt.cm.OrRd)
plt.colorbar()
#pylab.show()
plt.savefig('./' + name + 'Matrix.png', dpi=400)
plt.close()
def plot_block_matrix(labels, tProb_, name='BlockMatrix'):
print("Plot Block Matrix")
indices = np.argsort(labels)
#print indices
block_matrix = tProb_[:,indices]
block_matrix = block_matrix[indices,:]
block_matrix = 1 - block_matrix
#print block_matrix
pylab.matshow(block_matrix, cmap=plt.cm.OrRd)
plt.colorbar()
plt.savefig('./' + name + '.png', dpi=400)
#pylab.show()
plt.close()
def plot_cluster_size_distribution(populations, name='Populations'):
fig = plt.figure(1, (10,6))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.rc("font", size=30)
plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = range(len(populations))
X_xtick = ['']
for i in xrange(1, len(populations)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
plt.xticks(np.arange(len(populations)+1), X_xtick)
plt.ylabel(r"Probability")
plt.ylim([0,100])
print("X:", X)
distrib.bar(X, populations*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181',
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
def plot_compare_cluster_size_distribution(populations_1, populations_2, name='Populations'):
fig = plt.figure(1, (10,8))
distrib = fig.add_subplot(1,1,1)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
bar_width = 0.45
plt.rc("font", size=20)
#plt.title('Cluster size distributions', fontsize=20)
distrib.grid(True)
X = np.arange(len(populations_1))
X_xtick = ['']
for i in xrange(1, len(populations_1)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X, populations_1*100, facecolor='black', edgecolor='white', width=bar_width,label="kNN Density Peaks 3645 states") #facecolor='#f78181',
# populations_2
#X = range(len(populations_2))
X_xtick = ['']
for i in xrange(1, len(populations_2)+1):
xx = '$10^' + str(i) + '$'
X_xtick.append(xx)
print(X_xtick)
#plt.xticks(X , ('$10^0$', '$10^1$', '$10^2$', '$10^3$', '$10^4$'))
print("X:", X)
distrib.bar(X+bar_width, populations_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states") #facecolor='#f78181',
plt.xticks(np.arange(len(populations_1)+1+bar_width), X_xtick)
#plt.ylabel(r"Fraction number of clusters")
plt.ylabel(r"Probability")
plt.ylim([0,60])
plt.legend()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
plt.close()
#plt.show()
#From Wang Wei's code
def plot_landscape(labels=None, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=80, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '.', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in range(0, len(phi_angles)):
if psi_angles[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_angles[i])[0][0] - 1
if phi_angles[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_angles[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_angles)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
if potential is False:
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
else:
plt.xlim([-75, 75])
plt.ylim([-75, 75])
plt.xticks([-50, 0, 50])
plt.yticks([-50, 0, 50])
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
#Cluster Centers on Free energy landscape distribution
fig = plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
plt.title('Cluster Centers on Free energy landscape distribution', fontsize=20)
plt.xlabel("$k_B T$")
plt.ylabel(r"Probability")
plt.ylim([0, 100])
plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
distrib.bar(np.arange(10), distribution*100, facecolor='black', edgecolor='white', width=1.0) #facecolor='#f78181'
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_compare_distribution(labels_1=None, labels_2=None, phi_angles=None, psi_angles=None, phi_ctr_1=None, psi_ctr_1=None, phi_ctr_2=None, psi_ctr_2=None, name='Energy_Landscape', bins=36, potential=False):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
#extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
#plt.figure(figsize=(10, 10))
#plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot cluster centers on landscape
#if labels_1 is not None:
# plt.plot(phi_ctr_1, psi_ctr_1, '*', markersize=8, color='r')
distribution_1 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_1)):
if psi_ctr_1[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_1[i])[0][0] - 1
if phi_ctr_1[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_1[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_1[index_distrib] += 1
distribution_1 /= len(phi_ctr_1)
print(distribution_1)
distribution_2 = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
for i in xrange(0, len(phi_ctr_2)):
if psi_ctr_2[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr_2[i])[0][0] - 1
if phi_ctr_2[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr_2[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution_2[index_distrib] += 1
distribution_2 /= len(phi_ctr_2)
print(distribution_2)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
#cbar = plt.colorbar(shrink=0.77)
##plt.title('Free energy landscape', fontsize=20)
#cbar.set_label("$k_B T$", size=20)
#cbar.ax.tick_params(labelsize=20)
#if potential is False:
# plt.xlim([-180, 180])
# plt.ylim([-180, 180])
# plt.xticks([-120, -60, 0, 60, 120])
# plt.yticks([-120, -60, 0, 60, 120])
#else:
# plt.xlim([-75, 75])
# plt.ylim([-75, 75])
# plt.xticks([-50, 0, 50])
# plt.yticks([-50, 0, 50])
#plt.savefig('./' + name + '.png', dpi=400)
##plt.show()
#plt.close()
#Cluster Centers on Free energy landscape distribution
fig=plt.figure(1, (10,6))
plt.rc("font", size=15)
distrib = fig.add_subplot(1,1,1)
distrib.grid(True)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
distrib.yaxis.set_major_formatter(xticks)
# plt.xticks(np.arange(11), ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
n_groups = 10
index = np.arange(n_groups)
bar_width = 0.45
distrib.bar(index, distribution_1*100, facecolor='black', edgecolor='white', width=bar_width, label="kNN Density Peaks 3645 states") #facecolor='#f78181'
distrib.bar(index+bar_width, distribution_2*100, facecolor='gray', edgecolor='white', width=bar_width, label="kNN Density Peaks 117 states")
#plt.title('Cluster Centers on Free energy landscape distribution', fontsize=10)
plt.xlabel("$k_B T$")
plt.ylabel(r"Fraction number of clusters")
plt.ylim([0, 50])
plt.xticks(index+bar_width, ('', '1', '', '3', '', '5', '', '7', '', '9', ''))
plt.legend()
#plt.tight_layout()
plt.savefig('./' + name + '_Distribution.png', dpi=400)
#plt.show()
plt.close()
def plot_landscape_barrier(labels=None, selected=1, phi_angles=None, psi_angles=None, phi_ctr=None, psi_ctr=None, name='Energy_Landscape', bins=36, potential=False, outliers=-1):
H, xedges, yedges = np.histogram2d(psi_angles, phi_angles, bins=bins)
#since we calculate total number in 10 interval, thus bin of every dimension must be 36
#If element in H is zero, set the final energy to be 9
plt.rc("font", size=25)
maxH = np.max(H)
for i in range(len(H)):
for j in range(len(H)):
if H[i][j]==0:
H[i][j]=9
else:
H[i][j] = -np.log(H[i][j]/maxH)
#H = -np.log(H/np.max(H))
extent =[np.min(xedges), np.max(xedges), np.min(yedges), np.max(yedges)]
plt.figure(figsize=(12, 12))
plt.imshow(H, extent=extent, origin="lower", cmap=plt.cm.gray) #plt.cm.jet
#plot points
colors = ['y', 'b', 'tomato', 'm', 'g', 'c', 'yellowgreen']
color_index = 0
clusters = np.unique(labels)
for i in clusters:
if i != outliers:
if i in selected:
point = np.where(labels == i)
plt.plot(phi_angles[point], psi_angles[point], '2', alpha=0.20, color=colors[color_index])#, color=colors_jet[i])
color_index += 1
#plot cluster centers on landscape
if labels is not None:
plt.plot(phi_ctr, psi_ctr, '*', markersize=10, color='r')
distribution = np.array([0,0,0,0,0,0,0,0,0,0], dtype=np.float64)
#print "len phi_ctr", len(phi_ctr)
#print "shape of xedges", xedges.shape
for i in xrange(0, len(phi_ctr)):
if psi_ctr[i] > 179.0:
index_x = np.where(xedges > 179.0)[0][0] - 1
else:
index_x = np.where(xedges > psi_ctr[i])[0][0] - 1
if phi_ctr[i] > 179.0:
index_y = np.where(yedges > 179.0)[0][0] - 1
else:
index_y = np.where(yedges > phi_ctr[i])[0][0] - 1
index_distrib = int(H[index_x][index_y])
distribution[index_distrib] += 1
distribution /= len(phi_ctr)
print(distribution)
# print "clenter:", i, "[", phi_ctr,",", psi_ctr,"]", "H=", H[index_x][index_y]
plt.xlabel('$\phi$', fontsize=20)
plt.ylabel('$\Psi$', fontsize=20)
cbar = plt.colorbar(shrink=0.77)
#plt.title('Free energy landscape', fontsize=20)
cbar.set_label("$k_B T$", size=20)
cbar.ax.tick_params(labelsize=20)
plt.xlim([-180, 180])
plt.ylim([-180, 180])
plt.xticks([-120, -60, 0, 60, 120])
plt.yticks([-120, -60, 0, 60, 120])
plt.plot([-103,-103],[30,180],'w') #plot the barrier
plt.savefig('./' + name + '.png', dpi=400)
#plt.show()
plt.close()
def calculate_population(labels, name='Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels).values())
total_states = np.max(labels) + 1
#states_magnitude = int(np.ceil(np.log10(total_states)))
total_frames = len(labels)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations[magnitude] += 1
#print magnitude populations
print("Populations Probability:")
#bins = [0]
for i in xrange(len(populations)):
populations[i] = populations[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations[i]*100, "%")
#bins.append(10**(i+1))
name += '_Populations'
print("name:", name)
plot_cluster_size_distribution(populations=populations, name=name)
print("Done.")
def compare_population(labels_1, labels_2, name='Compare_Populations'):
print("Calculating and plotting population...")
counts = list(Counter(labels_1).values())
total_states = np.max(labels_1) + 1
total_frames = len(labels_1)
frames_magnitude = int(np.ceil(np.log10(total_frames)))
print("states", total_states, "frames", total_frames)
populations_1 = np.zeros(frames_magnitude+1)
for i in counts:
if i > 0:
log_i = np.log10(i)
magnitude = np.ceil(log_i)
populations_1[magnitude] += 1
print("Populations Probability:")
for i in xrange(len(populations_1)):
populations_1[i] = populations_1[i] / total_states
print("10 ^", i, "to", "10 ^", i+1,":", populations_1[i]*100, "%")
counts = list(Counter(labels_2).values())
total_states = np.max(labels_2) + 1
total_frames = len(labels_2)
frames_magnitude = int(np.ceil( | np.log10(total_frames) | numpy.log10 |
# coding: utf-8
# Copyright (c) MoGroup at UMD.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import numpy as np
from monty.json import MSONable
from scipy import stats
from scipy.optimize import curve_fit
from pymatgen.io.vasp.outputs import Vasprun
from pymatgen.util.coord import pbc_diff
from pymatgen.core.structure import Structure
from pymatgen.core.periodic_table import Specie
import csv
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__version__ = "0.1"
__date__ = "6/6/2017"
__credit__ = "Pymatgen Development Team"
class DiffusivityAnalyzer(MSONable):
def __init__(self, structure, displacements, specie, temperature,
time_step, step_skip, time_intervals_number=1000,
spec_dict=None):
"""
Calculate MSD from pre-processed data, and implemented linear fitting to obtain diffusivity.
:param structure (Structure): initial structure
:param displacements (np.array): numpy array, shape is [n_ions, n_steps, axis]
:param specie (str): species string, can be Li or Li+, make sure structure has oxidation
state accordingly.
:param temperature (float): temperature of MD
:param time_step (float): time step in MD
:param step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
:param time_intervals_number (int): number of time intervals. Default is 1000
means there are ~1000 time intervals.
:param spec_dict (dict): spec dict of linear fitting. Default is
{'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}
lower_bound is in unit of Angstrom square
upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total
minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger
than minimum_msd_diff to do linear fitting.
"""
spec_dict = spec_dict if spec_dict is not None else {'lower_bound': 4.5, 'upper_bound': 0.5,
'minimum_msd_diff': 4.5}
if not {'lower_bound', 'upper_bound', 'minimum_msd_diff'} <= set(spec_dict.keys()):
raise Exception("spec_dict does not have enough parameters.")
time_step_displacements = time_step * step_skip
# prepare
indices = []
framework_indices = []
for i, site in enumerate(structure):
if site.specie.symbol == specie:
indices.append(i)
else:
framework_indices.append(i)
if len(indices) == 0:
raise Exception("There is no specie {} in the structure".format(specie))
if len(framework_indices) == 0:
dc = displacements
else:
framework_disp = displacements[framework_indices]
drift = np.average(framework_disp, axis=0)[None, :, :]
dc = displacements - drift
df = structure.lattice.get_fractional_coords(dc)
displacements_final_diffusion_ions = dc[indices]
displacements_frac_final_diffusion_ions = df[indices]
n_ions, n_steps, dim = displacements_final_diffusion_ions.shape
# time intervals, dt
dt_indices = np.arange(1, n_steps, max(int((n_steps - 1) / time_intervals_number), 1))
dt = dt_indices * time_step_displacements
# calculate msd
# define functions, algorithm from
# http://stackoverflow.com/questions/34222272/computing-mean-square-displacement-using-python-and-fft
def autocorrelation_fft(x):
N = x.shape[0]
F = np.fft.fft(x, n=2 * N)
PSD = F * F.conjugate()
res = np.fft.ifft(PSD)
res = (res[:N]).real
n = N * np.ones(N) - np.arange(N)
return res / n
def one_ion_msd_fft(r, dt_indices):
"""
r (np.array, shape is typically [n_step,3], n_step is number of steps, 3 is 3 dimentions)
"""
# ------------ S1
n_step, dim = r.shape
r_square = np.square(r)
r_square = np.append(r_square, np.zeros((1, dim)), axis=0) # (n_step+1, 3)
S1_component = np.zeros((dim, n_step)) # (dim, n_step)
r_square_sum = 2 * np.sum(r_square, axis=0) # (3)
for i in range(n_step):
r_square_sum = r_square_sum - r_square[i - 1, :] - r_square[n_step - i, :]
S1_component[:, i] = r_square_sum / (n_step - i)
S1 = np.sum(S1_component, axis=0)
# ------------ S2
S2_component = np.array([autocorrelation_fft(r[:, i]) for i in range(r.shape[1])]) # (dim, N)
S2 = np.sum(S2_component, axis=0)
# ------------ return
return (S1 - 2 * S2)[dt_indices], (S1_component - 2 * S2_component)[:, dt_indices]
n_dt = len(dt_indices)
msd_by_ions = np.empty([0, n_dt]) # shape of n_ions * n_dt
msd_component_by_ions = np.empty([3, 0, n_dt]) # shape of 3 * n_ions * n_dt
for i in range(n_ions):
msd_i, msd_component_i = one_ion_msd_fft(displacements_final_diffusion_ions[i, :, :], dt_indices)
msd_by_ions = np.append(msd_by_ions,
msd_i.reshape(1, n_dt),
axis=0)
msd_component_by_ions = np.append(msd_component_by_ions,
msd_component_i.reshape(3, 1, n_dt),
axis=1)
msd = np.average(msd_by_ions, axis=0)
msd_component = np.average(msd_component_by_ions, axis=1)
# further things, 1. determine lower_index, upper_index 2. linear fitting, 3. error bar
# one headache, how about error in different axis
lower_bound_index = len(msd[msd < spec_dict['lower_bound']])
upper_bound_index = int(len(msd) * spec_dict['upper_bound']) - 1
"""
if lower_bound_index >= upper_bound_index - 2:
raise Exception("Maximum MSD is {:.2f}. ".format(max(msd)) + \
"MSD array has shape of {}. ".format(msd.shape) + \
"Lower bound index is {}, upper bound index is {}. ".format(lower_bound_index,
upper_bound_index) + \
"There is no enough data to fit. " + \
"Please consider extending your MD simulation or increasing the temperature.")
if msd[upper_bound_index] - msd[lower_bound_index] < spec_dict['minimum_msd_diff']:
raise Exception(
"Maximum MSD is {:.2f}. ".format(max(msd)) + \
"MSD at lower bound is {:.2f}, MSD at upper bound is {:.2f}. The MSD fitting range is too small. " \
.format(msd[lower_bound_index], msd[upper_bound_index]) + \
"Please consider extending your MD simulation or increasing the temperature.")
"""
if lower_bound_index >= upper_bound_index - 2 or \
msd[upper_bound_index] - msd[lower_bound_index] < spec_dict['minimum_msd_diff']:
slope = -1
intercept = -1
slope_components = np.zeros(dim)
else:
slope, intercept, _, _, _ = stats.linregress(dt[lower_bound_index:upper_bound_index + 1],
msd[lower_bound_index:upper_bound_index + 1])
slope_components = np.zeros(dim)
for i in range(dim):
s, _, _, _, _ = stats.linregress(dt[lower_bound_index:upper_bound_index + 1],
msd_component[i, :][lower_bound_index:upper_bound_index + 1])
slope_components[i] = s
self.structure = structure
self.indices = indices
self.framework_indices = framework_indices
self.drift = drift
self.drift_maximum = np.max(np.abs(drift), axis=1)[0] # the maximum drift vector of the framework ions, shape is (3,)
self.disp = displacements
self.displacements_final_diffusion_ions = displacements_final_diffusion_ions
self.specie = specie
self.temperature = temperature
self.time_step = time_step
self.step_skip = step_skip
self.time_step_displacements = time_step_displacements
self.time_intervals_number = time_intervals_number
self.spec_dict = spec_dict
if len(framework_indices) == 0:
self.max_framework_displacement = 0.0
else:
self.max_ion_displacements = np.max(np.sum(
dc ** 2, axis=-1) ** 0.5, axis=1)
self.max_framework_displacement = \
np.max(self.max_ion_displacements[framework_indices])
self.dt = dt
self.lower_bound = spec_dict['lower_bound']
self.upper_bound = spec_dict['upper_bound']
self.lower_bound_index = lower_bound_index
self.upper_bound_index = upper_bound_index
self.msd = msd
self.msd_by_ions = msd_by_ions
self.msd_component = msd_component
self.diffusivity = slope / (20 * dim)
self.diffusivity_components = slope_components / 20
def get_summary_dict(self, oxidized_specie=None):
"""
A summary of information
:param oxidized_specie (str): specie string with oxidation state. If provided or specie in initial
function is oxidized, it will calculate conductivity based on nernst-einstein relationship.
:return: dict of diffusion information
keys: D, D_components, specie, step_skip, temperature, msd, msd_component, dt, time_intervals_number
spec_dict
"""
d = {"diffusivity": self.diffusivity,
"diffusivity_components": self.diffusivity_components,
"specie": self.specie,
"step_skip": self.step_skip,
"temperature": self.temperature,
"msd": self.msd,
"msd_component": self.msd_component,
"dt": self.dt,
"time_intervals_number": self.time_intervals_number,
"spec_dict": self.spec_dict,
"drift_maximum": self.drift_maximum
}
oxi = False
if oxidized_specie:
df_sp = Specie.from_string(oxidized_specie)
oxi = True
else:
try:
df_sp = Specie.from_string(self.specie)
oxi = True
except:
pass
if oxi:
factor = get_conversion_factor(self.structure, df_sp, self.temperature)
d['conductivity'] = factor * self.diffusivity
d['conductivity_components'] = factor * self.diffusivity_components
d['conversion_factor'] = factor
d['oxidation_state'] = df_sp.oxi_state
return d
@classmethod
def from_structures(cls, structures, specie, temperature,
time_step, step_skip, time_intervals_number=1000,
spec_dict=None):
"""
Convenient constructor that takes in a list of Structure objects to
perform diffusion analysis.
:param structures ([Structure]): list of Structure objects:
:param specie (str): species string, like Li, Li+
:param temperature (float): temperature of MD
:param time_step (float): time step in MD
:param step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
:param time_intervals_number (int): number of time intervals. Default is 1000
means there are ~1000 time intervals.
:param spec_dict (dict): spec dict of linear fitting. Default is
{'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}
lower_bound is in unit of Angstrom square
upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total
minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger
than minimum_msd_diff to do linear fitting.
"""
p = []
for i, s in enumerate(structures):
if i == 0:
structure = s
p.append(np.array(s.frac_coords)[:, None])
p.insert(0, p[0])
p = np.concatenate(p, axis=1)
dp = p[:, 1:] - p[:, :-1]
dp = dp - np.round(dp)
f_disp = np.cumsum(dp, axis=1)
disp = structure.lattice.get_cartesian_coords(f_disp)
return cls(structure, disp, specie, temperature,
time_step, step_skip=step_skip,
time_intervals_number=time_intervals_number,
spec_dict=spec_dict)
@classmethod
def from_vaspruns(cls, vaspruns, specie,
time_intervals_number=1000,
spec_dict=None):
"""
Convenient constructor that takes in a list of Vasprun objects to
perform diffusion analysis.
:param vaspruns ([Vasprun]): List of Vaspruns (ordered):
:param specie (str): species string, like Li, Li+
:param time_intervals_number (int): number of time intervals. Default is 1000
means there are ~1000 time intervals.
:param spec_dict (dict): spec dict of linear fitting. Default is
{'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}
lower_bound is in unit of Angstrom square
upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total
minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger
than minimum_msd_diff to do linear fitting.
"""
def get_structures(vaspruns):
for i, vr in enumerate(vaspruns):
if i == 0:
step_skip = vr.ionic_step_skip or 1
final_structure = vr.initial_structure
temperature = vr.parameters['TEEND']
time_step = vr.parameters['POTIM']
yield step_skip, temperature, time_step
# check that the runs are continuous
fdist = pbc_diff(vr.initial_structure.frac_coords,
final_structure.frac_coords)
if np.any(fdist > 0.001):
raise ValueError('initial and final structures do not '
'match.')
final_structure = vr.final_structure
assert (vr.ionic_step_skip or 1) == step_skip
for s in vr.ionic_steps:
yield s['structure']
s = get_structures(vaspruns)
step_skip, temperature, time_step = next(s)
return cls.from_structures(structures=s, specie=specie,
temperature=temperature, time_step=time_step, step_skip=step_skip,
time_intervals_number=time_intervals_number, spec_dict=spec_dict)
@classmethod
def from_files(cls, filepaths, specie, step_skip=10, ncores=None,
time_intervals_number=1000,
spec_dict=None):
"""
Convenient constructor that takes in a list of vasprun.xml paths to
perform diffusion analysis.
:param filepaths ([str]): List of paths to vasprun.xml files of runs, ordered.
:param specie (str): species string, like Li, Li+
:param step_skip (int): Sampling frequency of the displacements (
time_step is multiplied by this number to get the real time
between measurements)
:param ncores (int): Numbers of cores to use for multiprocessing. Can
speed up vasprun parsing considerably. Defaults to None,
which means serial. It should be noted that if you want to
use multiprocessing, the number of ionic steps in all vasprun
.xml files should be a multiple of the ionic_step_skip.
Otherwise, inconsistent results may arise. Serial mode has no
such restrictions.
:param time_intervals_number (int): number of time intervals. Default is 1000
means there are ~1000 time intervals.
:param spec_dict (dict): spec dict of linear fitting. Default is
{'lower_bound': 4.5, 'upper_bound': 0.5, 'minimum_msd_diff': 4.5}
lower_bound is in unit of Angstrom square
upper_bound is in unit of total time. 0.5 means upper fitting bound is 0.5*t_total
minimum_msd_diff is in unit of Angstrom square. msd[upper_bound] - msd[lower_bound] should larger
than minimum_msd_diff to do linear fitting.
"""
if ncores is not None and len(filepaths) > 1:
import multiprocessing
p = multiprocessing.Pool(ncores)
vaspruns = p.imap(_get_vasprun,
[(fp, step_skip) for fp in filepaths])
analyzer = cls.from_vaspruns(vaspruns, specie=specie,
time_intervals_number=time_intervals_number,
spec_dict=spec_dict)
p.close()
p.join()
return analyzer
else:
def vr(filepaths):
offset = 0
for p in filepaths:
v = Vasprun(p, ionic_step_offset=offset,
ionic_step_skip=step_skip)
yield v
# Recompute offset.
offset = (-(v.nionic_steps - offset)) % step_skip
return cls.from_vaspruns(vr(filepaths), specie=specie,
time_intervals_number=time_intervals_number,
spec_dict=spec_dict)
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"structure": self.structure.as_dict(),
"displacements": self.disp.tolist(),
"specie": self.specie,
"temperature": self.temperature,
"time_step": self.time_step,
"step_skip": self.step_skip,
"time_intervals_number": self.time_intervals_number,
"spec_dict": self.spec_dict}
@classmethod
def from_dict(cls, d):
structure = Structure.from_dict(d["structure"])
return cls(structure, np.array(d["displacements"]), specie=d["specie"],
temperature=d["temperature"], time_step=d["time_step"],
step_skip=d["step_skip"], time_intervals_number=d["time_intervals_number"],
spec_dict=d['spec_dict'])
class ErrorAnalysisFromDiffusivityAnalyzer(object):
def __init__(self, diffusivity_analyzer, site_distance=3.0):
"""
Estimate the relative standard deviation (RSD) of D from the equation:
RSD = 3.43/sqrt(N_jump) + 0.04
:param diffusivity_analyzer (DiffusivityAnalyzer object):
:param site_distance (float): the site distance between diffusion ions (averagely)
"""
n_jump = len(diffusivity_analyzer.indices) * \
np.max(diffusivity_analyzer.msd) / (site_distance * site_distance)
n_jump_component = len(diffusivity_analyzer.indices) * \
np.max(diffusivity_analyzer.msd_component, axis=1) / (site_distance * site_distance)
RSD_D = 3.43 / np.sqrt(n_jump) + 0.04
RSD_D_component = [None, None, None]
for i in range(3):
RSD_D_component[i] = 3.43 / np.sqrt(n_jump_component[i]) + 0.04
self.diffusivity_analyzer = diffusivity_analyzer
self.n_jump = n_jump
self.n_jump_component = n_jump_component
self.RSD_D = RSD_D
self.RSD_D_component = np.array(RSD_D_component)
def get_summary_dict(self, oxidized_specie=None):
"""
A summary of information
:param oxidized_specie (str): specie string with oxidation state. If provided or specie in initial
function is oxidized, it will calculate conductivity based on nernst-einstein relationship.
:return: dict of diffusion information
"""
d = self.diffusivity_analyzer.get_summary_dict(oxidized_specie=oxidized_specie)
d['n_jump'] = self.n_jump
d['n_jump_component'] = self.n_jump_component
d['diffusivity_relative_standard_deviation'] = self.RSD_D
d['diffusivity_standard_deviation'] = self.RSD_D * d['diffusivity']
d['diffusivity_component_relative_standard_deviation'] = self.RSD_D_component
d['diffusivity_component_relative_standard_deviation'] = self.RSD_D_component * d['diffusivity_components']
return d
def _get_vasprun(args):
"""
Internal method to support multiprocessing.
"""
return Vasprun(args[0], ionic_step_skip=args[1],
parse_dos=False, parse_eigen=False)
class ArreheniusAnalyzer(object):
def __init__(self, temperatures, diffusivities, diffusivity_errors=None):
"""
Fitting arrehenius relationship from temperatures, diffusivities and diffusivity_error
:param temperatures (List): list of temperatures
:param diffusivities (List): list of diffusivities at different temperatures
:param diffusivity_errors (List): optional, list of diffusivity error at different temperatures
"""
slope_to_eV = -8.617e-5 * 1000 * np.log(10)
def linear(x, k, b):
return k * x + b
x = np.array([1000.0 / i for i in temperatures])
y = np.array([ | np.log10(i) | numpy.log10 |
import sys
sys.path.insert(1, '/home/ximo/Documents/GitHub/skforecast')
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from skforecast import __version__
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
# Test initializations
#-------------------------------------------------------------------------------
def test_init_lags_attribute_when_integer_is_passed():
forecaster = ForecasterAutoreg(LinearRegression(), lags=10)
assert (forecaster.lags == np.arange(10) + 1).all()
def test_init_lags_attribute_when_list_is_passed():
forecaster = ForecasterAutoreg(LinearRegression(), lags=[1, 2, 3])
assert (forecaster.lags == np.array([1, 2, 3])).all()
def test_init_lags_attribute_when_range_is_passed():
forecaster = ForecasterAutoreg(LinearRegression(), lags=range(1, 4))
assert (forecaster.lags == np.array(range(1, 4))).all()
def test_init_lags_attribute_when_numpy_arange_is_passed():
forecaster = ForecasterAutoreg(LinearRegression(), lags=np.arange(1, 10))
assert (forecaster.lags == np.arange(1, 10)).all()
def test_init_exception_when_lags_argument_is_int_less_than_1():
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=-10)
def test_init_exception_when_lags_argument_is_range_starting_at_zero():
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=range(0, 4))
def test_init_exception_when_lags_argument_is_numpy_arange_starting_at_zero():
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=np.arange(0, 4))
def test_init_exception_when_lags_argument_is_list_starting_at_zero():
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=[0, 1, 2])
# Test method create_lags()
#-------------------------------------------------------------------------------
def test_create_lags_when_lags_is_3_and_y_is_numpy_arange_10():
'''
Check matrix of lags is created properly
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
results = forecaster.create_lags(y=np.arange(10))
expected = (np.array([[2., 1., 0.],
[3., 2., 1.],
[4., 3., 2.],
[5., 4., 3.],
[6., 5., 4.],
[7., 6., 5.],
[8., 7., 6.]]),
np.array([3., 4., 5., 6., 7., 8., 9.]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_create_lags_exception_when_len_of_y_is_less_than_maximum_lag():
forecaster = ForecasterAutoreg(LinearRegression(), lags=10)
with pytest.raises(Exception):
forecaster.create_lags(y=np.arange(5))
# Test method create_train_X_y()
#-------------------------------------------------------------------------------
def test_create_train_X_y_output_when_y_is_range_10_and_exog_is_None():
forecaster = ForecasterAutoreg(LinearRegression(), lags=5)
results = forecaster.create_train_X_y(y=np.arange(10))
expected = (np.array([[4., 3., 2., 1., 0.],
[5., 4., 3., 2., 1.],
[6., 5., 4., 3., 2.],
[7., 6., 5., 4., 3.],
[8., 7., 6., 5., 4.]]),
np.array([5, 6, 7, 8, 9]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_create_train_X_y_output_when_y_is_range_10_and_exog_is_1d_array():
forecaster = ForecasterAutoreg(LinearRegression(), lags=5)
results = forecaster.create_train_X_y(y=np.arange(10), exog=np.arange(100, 110))
expected = (np.array([[4., 3., 2., 1., 0., 105.],
[5., 4., 3., 2., 1., 106.],
[6., 5., 4., 3., 2., 107.],
[7., 6., 5., 4., 3., 108.],
[8., 7., 6., 5., 4., 109.]]),
np.array([5, 6, 7, 8, 9]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
def test_create_train_X_y_output_when_y_is_range_10_and_exog_is_2d_array():
forecaster = ForecasterAutoreg(LinearRegression(), lags=5)
results = forecaster.create_train_X_y(
y=np.arange(10),
exog=np.column_stack([np.arange(100, 110), np.arange(1000, 1010)])
)
expected = (np.array([[4, 3, 2, 1, 0, 105, 1005],
[5, 4, 3, 2, 1, 106, 1006],
[6, 5, 4, 3, 2, 107, 1007],
[7, 6, 5, 4, 3, 108, 1008],
[8, 7, 6, 5, 4, 109, 1009]]),
np.array([5, 6, 7, 8, 9]))
assert (results[0] == expected[0]).all()
assert (results[1] == expected[1]).all()
# Test method fit()
#-------------------------------------------------------------------------------
def test_fit_exception_when_y_and_exog_have_different_lenght():
forecaster = ForecasterAutoreg(LinearRegression(), lags=5)
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=np.arange(10))
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=pd.Series(np.arange(10)))
def test_last_window_stored_when_fit_forecaster():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50))
assert (forecaster.last_window == np.array([47, 48, 49])).all()
def test_in_sample_residuals_stored_when_fit_forecaster():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(5))
expected = np.array([0, 0])
results = forecaster.in_sample_residuals
assert results == approx(expected)
# Test method predict()
#-------------------------------------------------------------------------------
def test_predict_exception_when_steps_lower_than_1():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=0)
def test_predict_exception_when_forecaster_fited_without_exog_and_exog_passed_when_predict():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=np.arange(10))
def test_predict_exception_when_forecaster_fited_with_exog_but_not_exog_passed_when_predict():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50), exog=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10)
def test_predict_exception_when_exog_passed_in_predict_has_different_columns_than_exog_used_to_fit_nparray():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(10), exog=np.arange(30).reshape(-1, 3))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=np.arange(30).reshape(-1, 2))
def test_predict_exception_when_exog_passed_in_predict_has_different_columns_than_exog_used_to_fit_pdDataDrame():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(10), exog=pd.DataFrame(np.arange(30).reshape(-1, 3)))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=pd.DataFrame(np.arange(30).reshape(-1, 2)))
def test_predict_exception_when_exog_lenght_is_less_than_steps():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50), exog=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, exog=np.arange(5))
def test_predict_exception_when_last_window_argument_is_not_numpy_array_or_pandas_series():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50))
with pytest.raises(Exception):
forecaster.predict(steps=10, last_window=[1,2,3])
def test_predict_exception_when_last_window_lenght_is_less_than_maximum_lag():
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y= | np.arange(50) | numpy.arange |
import os
import sys
import pickle
import copy
import numpy as np
import json
from skimage import io
from pathlib import Path
import torch
import spconv
from pcdet.utils import box_utils, common_utils
from pcdet.ops.roiaware_pool3d import roiaware_pool3d_utils
from pcdet.config import cfg
from pcdet.datasets.data_augmentation.dbsampler import DataBaseSampler
from pcdet.datasets import DatasetTemplate
from pcdet.datasets.cadc import cadc_calibration
class BaseCadcDataset(DatasetTemplate):
def __init__(self, root_path, split='train'):
super().__init__()
self.root_path = root_path
self.split = split
if split in ['train', 'val', 'test']:
split_dir = os.path.join(self.root_path, 'ImageSets', split + '.txt')
self.sample_id_list = [x.strip().split() for x in open(split_dir).readlines()] if os.path.exists(split_dir) else None
def set_split(self, split):
self.__init__(self.root_path, split)
def get_lidar(self, sample_idx):
date, set_num, idx = sample_idx
lidar_file = os.path.join(self.root_path, date, set_num, 'labeled', 'lidar_points', 'data', '%s.bin' % idx)
assert os.path.exists(lidar_file)
points = np.fromfile(lidar_file, dtype=np.float32).reshape(-1, 4)
points[:, 3] /= 255
return points
def get_image_shape(self, sample_idx):
date, set_num, idx = sample_idx
img_file = os.path.join(self.root_path, date, set_num, 'labeled', 'image_00', 'data', '%s.png' % idx)
assert os.path.exists(img_file)
return np.array(io.imread(img_file).shape[:2], dtype=np.int32)
def get_label(self, sample_idx):
date, set_num, idx = sample_idx
label_file = os.path.join(self.root_path, date, set_num, '3d_ann.json')
assert os.path.exists(label_file)
return json.load(open(label_file, 'r'))
def get_calib(self, sample_idx):
date, set_num, idx = sample_idx
calib_path = os.path.join(self.root_path, date, 'calib')
assert os.path.exists(calib_path)
return cadc_calibration.Calibration(calib_path)
def get_road_plane(self, idx):
"""
plane_file = os.path.join(self.root_path, 'planes', '%s.txt' % idx)
with open(plane_file, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
"""
# Currently unsupported in CADC
raise NotImplementedError
def get_annotation_from_label(self, calib, sample_idx):
date, set_num, idx = sample_idx
obj_list = self.get_label(sample_idx)[int(idx)]['cuboids']
annotations = {}
annotations['name'] = np.array([obj['label'] for obj in obj_list])
annotations['num_points_in_gt'] = [[obj['points_count'] for obj in obj_list]]
loc_lidar = np.array([[obj['position']['x'],obj['position']['y'],obj['position']['z']] for obj in obj_list])
dims = np.array([[obj['dimensions']['x'],obj['dimensions']['y'],obj['dimensions']['z']] for obj in obj_list])
rots = np.array([obj['yaw'] for obj in obj_list])
gt_boxes_lidar = np.concatenate([loc_lidar, dims, rots[..., np.newaxis]], axis=1)
annotations['gt_boxes_lidar'] = gt_boxes_lidar
# in camera 0 frame. Probably meaningless as most objects aren't in frame.
annotations['location'] = calib.lidar_to_rect(loc_lidar)
annotations['rotation_y'] = rots
annotations['dimensions'] = np.array([[obj['dimensions']['y'], obj['dimensions']['z'], obj['dimensions']['x']] for obj in obj_list]) # lhw format
gt_boxes_camera = box_utils.boxes3d_lidar_to_camera(gt_boxes_lidar, calib)
# Currently unused for CADC, and don't make too much since as we primarily use 360 degree 3d LIDAR boxes.
annotations['score'] = np.array([1 for _ in obj_list])
annotations['difficulty'] = np.array([0 for obj in obj_list], np.int32)
annotations['truncated'] = np.array([0 for _ in obj_list])
annotations['occluded'] = np.array([0 for _ in obj_list])
annotations['alpha'] = np.array([-np.arctan2(-gt_boxes_lidar[i][1], gt_boxes_lidar[i][0]) + gt_boxes_camera[i][6] for i in range(len(obj_list))])
annotations['bbox'] = gt_boxes_camera
return annotations
@staticmethod
def get_fov_flag(pts_rect, img_shape, calib):
'''
Valid point should be in the image (and in the PC_AREA_SCOPE)
:param pts_rect:
:param img_shape:
:return:
'''
pts_img, pts_rect_depth = calib.rect_to_img(pts_rect)
val_flag_1 = np.logical_and(pts_img[:, 0] >= 0, pts_img[:, 0] < img_shape[1])
val_flag_2 = np.logical_and(pts_img[:, 1] >= 0, pts_img[:, 1] < img_shape[0])
val_flag_merge = np.logical_and(val_flag_1, val_flag_2)
pts_valid_flag = np.logical_and(val_flag_merge, pts_rect_depth >= 0)
return pts_valid_flag
def get_infos(self, num_workers=4, has_label=True, count_inside_pts=True, sample_id_list=None):
import concurrent.futures as futures
def process_single_scene(sample_idx):
print('%s sample_idx: %s ' % (self.split, sample_idx))
info = {}
pc_info = {'num_features': 4, 'lidar_idx': sample_idx}
info['point_cloud'] = pc_info
image_info = {'image_idx': sample_idx, 'image_shape': self.get_image_shape(sample_idx)}
info['image'] = image_info
calib = self.get_calib(sample_idx)
calib_info = {'T_IMG_CAM0': calib.t_img_cam[0], 'T_CAM_LIDAR': calib.t_cam_lidar[0]}
info['calib'] = calib_info
if has_label:
annotations = self.get_annotation_from_label(calib, sample_idx)
info['annos'] = annotations
return info
# temp = process_single_scene(self.sample_id_list[0])
sample_id_list = sample_id_list if sample_id_list is not None else self.sample_id_list
with futures.ThreadPoolExecutor(num_workers) as executor:
infos = executor.map(process_single_scene, sample_id_list)
return list(infos)
def create_groundtruth_database(self, info_path=None, used_classes=None, split='train'):
database_save_path = Path(self.root_path) / ('gt_database' if split == 'train' else ('gt_database_%s' % split))
db_info_save_path = Path(self.root_path) / ('cadc_dbinfos_%s.pkl' % split)
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
with open(info_path, 'rb') as f:
infos = pickle.load(f)
for k in range(len(infos)):
print('gt_database sample: %d/%d' % (k + 1, len(infos)))
info = infos[k]
sample_idx = info['point_cloud']['lidar_idx']
points = self.get_lidar(sample_idx)
annos = info['annos']
names = annos['name']
difficulty = annos['difficulty']
bbox = annos['bbox']
gt_boxes = annos['gt_boxes_lidar']
num_obj = gt_boxes.shape[0]
point_indices = roiaware_pool3d_utils.points_in_boxes_cpu(
torch.from_numpy(points[:, 0:3]), torch.from_numpy(gt_boxes[:,:7])
).numpy() # (nboxes, npoints)
for i in range(num_obj):
filename = '%s_%s_%s_%s_%d.bin' % (sample_idx[0], sample_idx[1], sample_idx[2], names[i], i)
filepath = database_save_path / filename
gt_points = points[point_indices[i] > 0]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
if (used_classes is None) or names[i] in used_classes:
db_path = str(filepath.relative_to(self.root_path)) # gt_database/xxxxx.bin
db_info = {'name': names[i], 'path': db_path, 'image_idx': sample_idx, 'gt_idx': i,
'box3d_lidar': gt_boxes[i], 'num_points_in_gt': gt_points.shape[0],
'difficulty': difficulty[i], 'bbox': bbox[i], 'score': annos['score'][i]}
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print('Database %s: %d' % (k, len(v)))
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
@staticmethod
def generate_prediction_dict(input_dict, index, record_dict):
# finally generate predictions.
sample_idx = input_dict['sample_idx'][index] if 'sample_idx' in input_dict else -1
boxes3d_lidar_preds = record_dict['boxes'].cpu().numpy()
if boxes3d_lidar_preds.shape[0] == 0:
return {'sample_idx': sample_idx}
calib = input_dict['calib'][index]
image_shape = input_dict['image_shape'][index]
boxes3d_camera_preds = box_utils.boxes3d_lidar_to_camera(boxes3d_lidar_preds, calib)
boxes2d_image_preds = box_utils.boxes3d_camera_to_imageboxes(boxes3d_camera_preds, calib,
image_shape=image_shape)
# predictions
predictions_dict = {
'bbox': boxes2d_image_preds,
'box3d_camera': boxes3d_camera_preds,
'box3d_lidar': boxes3d_lidar_preds,
'scores': record_dict['scores'].cpu().numpy(),
'label_preds': record_dict['labels'].cpu().numpy(),
'sample_idx': sample_idx,
}
return predictions_dict
@staticmethod
def generate_annotations(input_dict, pred_dicts, class_names, save_to_file=False, output_dir=None):
def get_empty_prediction():
ret_dict = {
'name': | np.array([]) | numpy.array |
import pandas as pd
import numpy as np
import glob
from neuropixels import generalephys_mua as ephys_mua
from neuropixels.generalephys import get_waveform_duration,get_waveform_PTratio,get_waveform_repolarizationslope,option234_positions
from scipy.cluster.vq import kmeans2
import seaborn as sns;sns.set_style("ticks")
import matplotlib.pyplot as plt
import h5py
import matplotlib.path as mpath
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
import os
def get_peak_waveform_from_template(template):
max = 0
ind=0
peak = np.zeros(np.shape(template.T)[0])
for i,wv in enumerate(template.T):
if np.max(np.abs(wv)) > max:
max = np.max(np.abs(wv))
ind = i
peak = wv
return peak
def df_from_phy_multimouse(folder,expnum='1',recnum='1',site_positions = option234_positions,**kwargs):
if 'est' not in folder:
base_folder = os.path.basename(folder)
cohort_ = os.path.basename(base_folder).split('_')[-2]
mouse_ = os.path.basename(base_folder).split('_')[-1]
#traverse down tree to data
if 'open-ephys-neuropix' in base_folder:
try:
rec_folder = glob.glob(folder+'/*')[0]
print(rec_folder, 'hey')
except:
print(base_folder)
return None
else:
rec_folder = folder
print(rec_folder)
raw_path = os.path.join(rec_folder,'recording'+str(recnum),'continuous')
if len(glob.glob(raw_path+'/*100.0*'))>0:
raw_path = glob.glob(raw_path+'/*100.0*')[0]
print('loading from '+raw_path)
else:
print('could not find data folder for '+raw_path)
if os.path.isfile(os.path.join(raw_path,'spike_clusters.npy')) :
# df = df_from_phy(raw_path,site_positions = ephys.option234_positions,cluster_file='KS2',cohort=cohort,mouse=mouse)
path = raw_path
units = ephys.load_phy_template(path,cluster_file='cluster_group',site_positions=site_positions)
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1; cohort = []
probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
for unit in list(units.keys()):
if 'probe' in kwargs.keys():
probe_id.extend([kwargs['probe']])
else:
probe_id.extend(['A'])
if 'mouse' in kwargs.keys():
mouse.extend([kwargs['mouse']])
else:
mouse.extend([mouse_])
if 'experiment' in kwargs.keys():
experiment.extend([kwargs['experiment']])
else:
experiment.extend(['placeholder'])
if 'cohort' in kwargs.keys():
cohort.extend([kwargs['cohort']])
else:
cohort.extend([cohort_])
xpos.extend([units[unit]['xpos']])
ypos.extend([units[unit]['ypos']])
template.extend([units[unit]['template']])
times.append(units[unit]['times'])
waveform.append(units[unit]['waveform_weights'])
df = pd.DataFrame(index=index)
df = df.fillna(np.nan)
# df['nwb_id'] = nwb_id
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
# df['structure'] = structure
df['cell'] = units.keys()
df['cohort'] = cohort
df['times'] = times
df['ypos'] = ypos
df['xpos'] = xpos
# df['depth'] = depth
df['waveform'] = waveform
df['template'] = template
return df
def df_from_phy(folder,expnum='1',recnum='1',site_positions = option234_positions,**kwargs):
# if 'est' not in folder:
# base_folder = os.path.basename(folder)
# cohort_ = os.path.basename(base_folder).split('_')[-2]
# mouse_ = os.path.basename(base_folder).split('_')[-1]
#traverse down tree to data
# if 'open-ephys-neuropix' in base_folder:
# try:
# rec_folder = glob.glob(folder+'/*')[0]
# except:
# print(base_folder)
# return None
# else:
# rec_folder = folder
# raw_path = os.path.join(rec_folder,'experiment'+str(expnum),'recording'+str(recnum),'continuous')
# if len(glob.glob(raw_path+'/*100.0*'))>0:
# raw_path = glob.glob(raw_path+'/*100.0*')[0]
# print('loading from '+raw_path)
# else:
# print('could not find data folder for '+raw_path)
raw_path=folder
if 'cohort' in kwargs.keys():
cohort = kwargs['cohort']
else:
cohort = None
if 'mouse' in kwargs.keys():
mouse = kwargs['mouse']
else:
mouse = None
# df = df_from_phy(raw_path,site_positions = ephys.option234_positions,cluster_file='KS2',cohort=cohort,mouse=mouse)
path = raw_path
#units = ephys.load_phy_template(path,cluster_file='KS2',site_positions=site_positions)
units = ephys_mua.load_phy_template_mua(path,site_positions=site_positions)
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1; cohort = []
probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
for unit in list(units.index):
if 'probe' in kwargs.keys():
probe_id.extend([kwargs['probe']])
else:
probe_id.extend(['A'])
if 'mouse' in kwargs.keys():
mouse.extend([kwargs['mouse']])
else:
mouse.extend([mouse_])
if 'experiment' in kwargs.keys():
experiment.extend([kwargs['experiment']])
else:
experiment.extend(['placeholder'])
if 'cohort' in kwargs.keys():
cohort.extend([kwargs['cohort']])
else:
cohort.extend([cohort_])
df = units
df['mouse'] = mouse
df['experiment'] = experiment
df['probe'] = probe_id
# df['structure'] = structure
df['cell'] = units.index
df['cohort'] = cohort
df['times'] = units['times']
df['ypos'] = units['ypos']
df['xpos'] = units['xpos']
# df['depth'] = xpos
df['waveform'] = units['waveform_weights']
df['template'] = units['template']
return df
def df_from_nwb(nwb_data,structures=None,insertion_angle=55,nwbid=0):
if type(nwb_data)==str:
#print(nwb_data)
nwbid = nwb_data
nwb_data = h5py.File(nwb_data)
else:
nwb_data = nwb_data
#structures is a dictionary that defines the bounds of the structure e.g.:{'v1':(0,850), 'hpc':(850,2000)}
mouse = [];experiment=[];cell = [];ypos = [];xpos = [];waveform=[];template=[];structure=[];times=[]
index = []; count = 1
nwb_id = [];probe_id=[]
depth=[];#print(list(nwb_data.keys()));print(list(nwb_data['processing'].keys()));
if 'processing' in nwb_data.keys():
for probe in list(nwb_data['processing'].keys()):
if 'UnitTimes' in list(nwb_data['processing'][probe].keys()):
for i,u in enumerate(list(nwb_data['processing'][probe]['UnitTimes'].keys())):
if u != 'unit_list':
nwb_id.append(nwbid)
probe_id.append(probe)
index.append(count);count+=1
mouse.append(str(np.array(nwb_data.get('identifier'))))
experiment.append(1)
cell.append(u)
times.append(np.array(nwb_data['processing'][probe]['UnitTimes'][u]['times']));# print(list(nwb_data['processing'][probe]['UnitTimes'][u].keys()))
if 'ypos' in list(nwb_data['processing'][probe]['UnitTimes'][u].keys()):
ypos.append( | np.array(nwb_data['processing'][probe]['UnitTimes'][u]['ypos']) | numpy.array |
import numpy as np
from scipy.optimize import least_squares
from scipy.integrate import odeint
def sol_u(t, u0, alpha, beta):
return u0 * np.exp(-beta * t) + alpha / beta * (1 - np.exp(-beta * t))
def sol_s(t, s0, u0, alpha, beta, gamma):
exp_gt = np.exp(-gamma * t)
if beta == gamma:
s = s0 * exp_gt + (beta * u0 - alpha) * t * exp_gt + alpha / gamma * (1 - exp_gt)
else:
s = (
s0 * exp_gt
+ alpha / gamma * (1 - exp_gt)
+ (alpha - u0 * beta) / (gamma - beta) * (exp_gt - np.exp(-beta * t))
)
return s
def sol_p(t, p0, s0, u0, alpha, beta, gamma, eta, gamma_p):
u = sol_u(t, u0, alpha, beta)
s = sol_s(t, s0, u0, alpha, beta, gamma)
exp_gt = np.exp(-gamma_p * t)
p = p0 * exp_gt + eta / (gamma_p - gamma) * (
s - s0 * exp_gt - beta / (gamma_p - beta) * (u - u0 * exp_gt - alpha / gamma_p * (1 - exp_gt))
)
return p, s, u
def sol_ode(x, t, alpha, beta, gamma, eta, gamma_p):
dx = np.zeros(x.shape)
dx[0] = alpha - beta * x[0]
dx[1] = beta * x[0] - gamma * x[1]
dx[2] = eta * x[1] - gamma_p * x[2]
return dx
def sol_num(t, p0, s0, u0, alpha, beta, gamma, eta, gamma_p):
sol = odeint(
lambda x, t: sol_ode(x, t, alpha, beta, gamma, eta, gamma_p),
np.array([u0, s0, p0]),
t,
)
return sol
def fit_gamma_labelling(t, l, mode=None, lbound=None):
t = np.array(t, dtype=float)
l = np.array(l, dtype=float)
if l.ndim == 1:
# l is a vector
n_rep = 1
else:
n_rep = l.shape[0]
t = np.tile(t, n_rep)
l = l.flatten()
# remove low counts based on lbound
if lbound is not None:
t[l < lbound] = np.nan
l[l < lbound] = np.nan
n = np.sum(~np.isnan(t))
tau = t - np.nanmin(t)
tm = np.nanmean(tau)
# prepare y
y = np.log(l)
ym = np.nanmean(y)
# calculate slope
var_t = np.nanmean(tau ** 2) - tm ** 2
cov = np.nansum(y * tau) / n - ym * tm
k = cov / var_t
# calculate intercept
b = np.exp(ym - k * tm) if mode != "fast" else None
gamma = -k
u0 = b
return gamma, u0
def fit_beta_lsq(t, l, bounds=(0, np.inf), fix_l0=False, beta_0=None):
tau = t - np.min(t)
l0 = np.mean(l[:, tau == 0])
if beta_0 is None:
beta_0 = 1
if fix_l0:
f_lsq = lambda b: (sol_u(tau, l0, 0, b) - l).flatten()
ret = least_squares(f_lsq, beta_0, bounds=bounds)
beta = ret.x
else:
f_lsq = lambda p: (sol_u(tau, p[1], 0, p[0]) - l).flatten()
ret = least_squares(f_lsq, np.array([beta_0, l0]), bounds=bounds)
beta = ret.x[0]
l0 = ret.x[1]
return beta, l0
def fit_alpha_labelling(t, u, gamma, mode=None):
n = u.size
tau = t - np.min(t)
expt = | np.exp(gamma * tau) | numpy.exp |
# This is python script for Metashape Pro. Scripts repository: https://github.com/agisoft-llc/metashape-scripts
#
# See https://agisoft.freshdesk.com/support/solutions/articles/31000162552-automatic-detection-of-objects-on-orthomosaic
# Based on https://github.com/weecology/DeepForest (tested on deepforest==1.0.8)
#
# This is a neural network assistant for objects (trees/cars/sea lions/etc.) detection on orthomosaic.
# This script can help you to detect trees or other objects (like cars or sea lions)
# using partial manual annotations to guide the neural network what to seek for.
# The pre-trained network was pre-trained for tree detection task, but the results are much better
# if you will annotate small region to guide the neural network (it will be trained additionally).
# Note that you need NVIDIA GPU for fast processing (i.e. CUDA-compatible GPU required, CPU is supported too but it is very slow)
#
# How to install (Linux):
#
# 1. cd .../metashape-pro
# LD_LIBRARY_PATH=`pwd`/python/lib/ python/bin/python3.8 -m pip install albumentations==1.0.3 deepforest pytorch-lightning==1.5.10 torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
# 2. Add this script to auto-launch - https://agisoft.freshdesk.com/support/solutions/articles/31000133123-how-to-run-python-script-automatically-on-metashape-professional-start
# copy detect_objects.py script to /home/<username>/.local/share/Agisoft/Metashape Pro/scripts/
#
# How to install (Windows):
#
# 1. Download latest gdal, rasterio and fiona packages (for Python 3.8 amd64, i.e. download files ending with ...cp38‑cp38‑win_amd64.whl)
# gdal - https://www.lfd.uci.edu/~gohlke/pythonlibs/#gdal
# rasterio - https://www.lfd.uci.edu/~gohlke/pythonlibs/#rasterio
# fiona - https://www.lfd.uci.edu/~gohlke/pythonlibs/#fiona
# 2. Now you need to install these downloaded python packages wheel:
# 3. Launch cmd.exe with the administrator privileges
# 4. Change directory to Downloads directory (where three downloaded files are located):
# cd %USERPROFILE%/Downloads
# 5. Install them one by one:
# "%programfiles%\Agisoft\Metashape Pro\python\python.exe" -m pip install GDAL‑3.4.2‑cp38‑cp38‑win_amd64.whl
# "%programfiles%\Agisoft\Metashape Pro\python\python.exe" -m pip install rasterio‑1.2.10‑cp38‑cp38‑win_amd64.whl
# "%programfiles%\Agisoft\Metashape Pro\python\python.exe" -m pip install Fiona‑1.8.21‑cp38‑cp38‑win_amd64.whl
# 6. Install pytorch with CUDA support and deepforest:
# "%programfiles%\Agisoft\Metashape Pro\python\python.exe" -m pip install albumentations==1.0.3 deepforest pytorch-lightning==1.5.10 torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio===0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
# 7. Add this script to auto-launch - https://agisoft.freshdesk.com/support/solutions/articles/31000133123-how-to-run-python-script-automatically-on-metashape-professional-start
# copy detect_objects.py script to C:/Users/<username>/AppData/Local/Agisoft/Metashape Pro/scripts/
#
# How to use:
#
# 1. Open a dataset with an orthomosaic with at least 10 cm/pix resolution (i.e. GSD should be <= 10 cm/pix), 10 cm/pix or 5 cm/pix are recommended
# 2. Create a shape layer 'Train zones' with at least one axis aligned bounding box (using 'Draw Rectangle') specifing the training zone
# (each of its sides should be around 50-60 meters), it is recommended to specify color for shapes of this layer - red for example
# 3. Create a shape layer 'Train data' with all trees (or all cars) in train zones specified as axis aligned bounding box (using 'Draw Rectangle'),
# it is recommended to specify different color for shapes of this layer - blue for example
# 4. Ensure that you didn't miss any objects (trees if you want to detect trees, or cars if you want to detect cars) in train zones
# 5. Press 'Custom menu/Detect objects'
# 6. Ensure that proper shape layers are selected as Train zones and Train data
# 7. Press Run
#
# To process detection only on some part of the orthomosaic please specify Outer Boundary
# (ensure that Train zones are inside the Outer Boundary):
# https://www.agisoft.com/forum/index.php?topic=4910.msg24580#msg24580
#
# How to use a pre-trained neural network model for tree detection:
# (not recommended because results are much better after training on annotated 50x50 meters zone)
#
# 1. Open a dataset with a orthomosaic with at least 10 cm/pix resolution (i.e. GSD should be <= 10 cm/pix)
# 2. Press 'Custom menu/Detect objects'
# 3. Press Run
#
# If you will encounter error like this:
# Downloading: "https://download.pytorch.org/models/retinanet_resnet50_fpn_coco-eeacb38b.pth" to C:\Users\<username>/.cache\torch\hub\checkpoints\retinanet_resnet50_fpn_coco-eeacb38b.pth
# Traceback (most recent call last):
# File "C:\Program Files\Agisoft\Metashape Pro\python\lib\urllib\request.py", line 1354, in do_open
# h.request(req.get_method(), req.selector, req.data, headers,
# ...
# File "C:\Program Files\Agisoft\Metashape Pro\python\lib\socket.py", line 918, in getaddrinfo
# for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
# socket.gaierror: [Errno 11002] getaddrinfo failed
# During handling of the above exception, another exception occurred:
# ...
# Then you need to manually download file at URL from the begining of the error message (similar to https://download.pytorch.org/models/retinanet_resnet50_fpn_coco-eeacb38b.pth)
# to the specified in the same line directory (C:\Users\<username>\.cache\torch\hub\checkpoints\)
import Metashape
import pathlib, shutil, os, time
from PySide2 import QtGui, QtCore, QtWidgets
# Checking compatibility
compatible_major_version = "1.8"
found_major_version = ".".join(Metashape.app.version.split('.')[:2])
if found_major_version != compatible_major_version:
raise Exception("Incompatible Metashape version: {} != {}".format(found_major_version, compatible_major_version))
def pandas_append(df, row, ignore_index=False):
import pandas as pd
if isinstance(row, pd.DataFrame):
result = pd.concat([df, row], ignore_index=ignore_index)
elif isinstance(row, pd.core.series.Series):
result = pd.concat([df, row.to_frame().T], ignore_index=ignore_index)
elif isinstance(row, dict):
result = pd.concat([df, pd.DataFrame(row, index=[0], columns=df.columns)])
else:
raise RuntimeError("pandas_append: unsupported row type - {}".format(type(row)))
return result
class DetectObjectsDlg(QtWidgets.QDialog):
def __init__(self, parent):
self.force_small_patch_size = False
# Set force_small_patch_size to True if you want to train on small zones with a lot of small objects (train zone still should be at least 400*orthomosaic_resolution)
# For example, with force_small_patch_size=True and orthomosaic resolution=5cm - train zone should be >= 20x20 m. If orthomosaic=2.5cm - train zone should be >= 10x10 m.
# Note that this can work only if:
# 1) You have very small objects
# 2) Train zone contains a lot of objects
self.augment_colors = True
# Set augment_colors to False if you have bad results and you want to force neural network to take into account color of objects
self.expected_layer_name_train_zones = "Train zone"
self.expected_layer_name_train_data = "Train data"
self.layer_name_detection_data = "Detected data"
if len(Metashape.app.document.path) > 0:
self.working_dir = str(pathlib.Path(Metashape.app.document.path).parent / "objects_detection")
else:
self.working_dir = ""
self.save_model_path = ""
self.load_model_path = self.readModelLoadPathFromSettings()
self.cleanup_working_dir = False
self.debug_tiles = False
self.train_on_user_data_enabled = False
self.max_epochs = 20 # bigger number of epochs leads to better neural network training (but slower)
self.data_augmentation_multiplier = 8 # from 1 to 8, bigger multiplier leads to better neural network training (but slower)
self.preferred_patch_size = 400 # 400 pixels
self.preferred_resolution = 0.10 # 10 cm/pix
self.prefer_original_resolution = True
self.use_neural_network_pretrained_on_birds = False
self.tiles_without_annotations_supported = False # See https://github.com/weecology/DeepForest/issues/216
QtWidgets.QDialog.__init__(self, parent)
self.setWindowTitle("Objects detection on orthomosaic")
self.chunk = Metashape.app.document.chunk
self.create_gui()
self.exec()
def stop(self):
self.stopped = True
def check_stopped(self):
if self.stopped:
raise InterruptedError("Stop was pressed")
def process(self):
try:
self.stopped = False
self.btnRun.setEnabled(False)
self.btnStop.setEnabled(True)
time_start = time.time()
self.load_params()
self.prepair()
print("Script started...")
self.create_neural_network()
self.export_orthomosaic()
if self.chunk.shapes is None:
self.chunk.shapes = Metashape.Shapes()
if self.train_on_user_data_enabled:
self.train_on_user_data()
if len(self.save_model_path) > 0:
self.saveToSettingsModelLoadPath(self.save_model_path)
else:
self.saveToSettingsModelLoadPath(self.load_model_path)
self.detect()
self.results_time_total = time.time() - time_start
self.show_results_dialog()
except:
if self.stopped:
Metashape.app.messageBox("Processing was stopped.")
else:
Metashape.app.messageBox("Something gone wrong.\n"
"Please check the console.")
raise
finally:
if self.cleanup_working_dir:
shutil.rmtree(self.working_dir, ignore_errors=True)
self.reject()
print("Script finished.")
return True
def prepair(self):
import os, sys, multiprocessing
import random, string
if self.working_dir == "":
raise Exception("You should specify working directory (or save .psx project)")
print("Working dir: {}".format(self.working_dir))
try:
os.mkdir(self.working_dir)
except FileExistsError:
already_existing_working_dir = self.working_dir
random_suffix = ''.join(random.choices(string.ascii_uppercase + string.digits, k=8))
self.working_dir = self.working_dir + "/tmp_" + random_suffix
print("Working dir: {} already exists, trying instead: {}".format(already_existing_working_dir, self.working_dir))
try:
os.mkdir(self.working_dir)
except FileExistsError:
raise Exception("Working directory {} already exists! Please specify another working dir.".format(self.working_dir))
self.cleanup_working_dir = True
self.dir_tiles = self.working_dir + "/tiles/"
self.dir_train_data = self.working_dir + "/train/"
self.dir_train_subtiles = self.dir_train_data + "inner/"
self.dir_train_subtiles_debug = self.dir_train_subtiles + "debug/"
self.dir_detection_results = self.working_dir + "/detection/"
self.dir_subtiles_results = self.dir_detection_results + "inner/"
for subdir in [self.dir_tiles, self.dir_train_data, self.dir_train_subtiles, self.dir_train_subtiles_debug, self.dir_detection_results, self.dir_subtiles_results]:
shutil.rmtree(subdir, ignore_errors=True)
os.mkdir(subdir)
import torch
torch_hub_dir = torch.hub.get_dir()
from deepforest import utilities
if not hasattr(utilities, '__models_dir_path_already_patched__'):
original_use_release = utilities.use_release
original_use_bird_release = utilities.use_bird_release
# This is a workaround for Windows permission issues (we can't easily download files into .../site-packages/deepforest/...)
def patched_use_release(**kwargs):
kwargs["save_dir"] = torch_hub_dir
return original_use_release(**kwargs)
def patched_use_bird_release(**kwargs):
kwargs["save_dir"] = torch_hub_dir
return original_use_bird_release(**kwargs)
utilities.use_release = patched_use_release
utilities.use_bird_release = patched_use_bird_release
utilities.__models_dir_path_already_patched__ = True
if os.name == 'nt': # if Windows
multiprocessing.set_executable(os.path.join(sys.exec_prefix, 'python.exe'))
def create_neural_network(self):
print("Neural network loading...")
import torch
import deepforest
import deepforest.main
self.m = deepforest.main.deepforest()
if len(self.load_model_path) > 0:
self.m.use_release()
print("Using the neural network loaded from '{}'...".format(self.load_model_path))
self.m.model = torch.load(self.load_model_path)
else:
if self.use_neural_network_pretrained_on_birds:
# use neural network pre-trained on birds
print("Using the neural network pre-trained on birds...")
self.m.use_bird_release()
else:
# use neural network pre-trained on trees
print("Using the neural network pre-trained on trees...")
self.m.use_release()
def export_orthomosaic(self):
import numpy as np
print("Prepairing orthomosaic...")
kwargs = {}
if not self.prefer_original_resolution and (self.chunk.orthomosaic.resolution < self.preferred_resolution*0.90):
kwargs["resolution"] = self.preferred_resolution
else:
print("no resolution downscaling required")
self.chunk.exportRaster(path=self.dir_tiles + "tile.jpg", source_data=Metashape.OrthomosaicData, image_format=Metashape.ImageFormat.ImageFormatJPEG, save_alpha=False, white_background=True,
save_world=True,
split_in_blocks=True, block_width=self.patch_size, block_height=self.patch_size,
**kwargs)
tiles = os.listdir(self.dir_tiles)
self.tiles_paths = {}
self.tiles_to_world = {}
for tile in sorted(tiles):
assert tile.startswith("tile-")
_, tile_x, tile_y = tile.split(".")[0].split("-")
tile_x, tile_y = map(int, [tile_x, tile_y])
if tile.endswith(".jgw") or tile.endswith(".pgw"): # https://en.wikipedia.org/wiki/World_file
with open(self.dir_tiles + tile, "r") as file:
matrix2x3 = list(map(float, file.readlines()))
matrix2x3 = np.array(matrix2x3).reshape(3, 2).T
self.tiles_to_world[tile_x, tile_y] = matrix2x3
else:
self.tiles_paths[tile_x, tile_y] = self.dir_tiles + tile
assert(len(self.tiles_paths) == len(self.tiles_to_world))
assert(self.tiles_paths.keys() == self.tiles_to_world.keys())
self.tile_min_x = min([key[0] for key in self.tiles_paths.keys()])
self.tile_max_x = max([key[0] for key in self.tiles_paths.keys()])
self.tile_min_y = min([key[1] for key in self.tiles_paths.keys()])
self.tile_max_y = max([key[1] for key in self.tiles_paths.keys()])
print("{} tiles, tile_x in [{}; {}], tile_y in [{}; {}]".format(len(self.tiles_paths), self.tile_min_x, self.tile_max_x, self.tile_min_y, self.tile_max_y))
def train_on_user_data(self):
import sys
import cv2
import random
import numpy as np
import pandas as pd
import multiprocessing
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import Callback
random.seed(2391231231324531)
app = QtWidgets.QApplication.instance()
training_start = time.time()
print("Neural network additional training on user data...")
self.train_zones_on_ortho = []
n_train_zone_shapes_out_of_orthomosaic = 0
for zone_i, shape in enumerate(self.train_zones):
assert(len(shape.geometry.coordinates) == 1)
shape_vertices = shape.geometry.coordinates[0]
zone_from_world = None
zone_from_world_best = None
for tile_x in range(self.tile_min_x, self.tile_max_x):
for tile_y in range(self.tile_min_y, self.tile_max_y):
if (tile_x, tile_y) not in self.tiles_paths:
continue
to_world = self.tiles_to_world[tile_x, tile_y]
from_world = self.invert_matrix_2x3(to_world)
for p in shape_vertices:
p = Metashape.CoordinateSystem.transform(p, self.chunk.shapes.crs, self.chunk.orthomosaic.crs)
p_in_tile = from_world @ [p.x, p.y, 1]
distance2_to_tile_center = np.linalg.norm(p_in_tile - [self.patch_size/2, self.patch_size/2])
if zone_from_world_best is None or distance2_to_tile_center < zone_from_world_best:
zone_from_world_best = distance2_to_tile_center
zone_from_world = self.invert_matrix_2x3(self.add_pixel_shift(to_world, -tile_x * self.patch_size, -tile_y * self.patch_size))
if zone_from_world_best > 1.1 * (self.patch_size / 2)**2:
n_train_zone_shapes_out_of_orthomosaic += 1
zone_from = None
zone_to = None
for p in shape_vertices:
p = Metashape.CoordinateSystem.transform(p, self.chunk.shapes.crs, self.chunk.orthomosaic.crs)
p_in_ortho = np.int32(np.round(zone_from_world @ [p.x, p.y, 1]))
if zone_from is None:
zone_from = p_in_ortho
if zone_to is None:
zone_to = p_in_ortho
zone_from = np.minimum(zone_from, p_in_ortho)
zone_to = np.maximum(zone_to, p_in_ortho)
train_size = zone_to - zone_from
train_size_m = np.int32(np.round(train_size * self.orthomosaic_resolution))
if np.any(train_size < self.patch_size):
print("Train zone #{} {}x{} pixels ({}x{} meters) is too small - each side should be at least {} meters"
.format(zone_i + 1, train_size[0], train_size[1], train_size_m[0], train_size_m[1], self.patch_size * self.orthomosaic_resolution), file=sys.stderr)
self.train_zones_on_ortho.append(None)
else:
print("Train zone #{}: {}x{} orthomosaic pixels, {}x{} meters".format(zone_i + 1, train_size[0], train_size[1], train_size_m[0], train_size_m[1]))
self.train_zones_on_ortho.append((zone_from, zone_to, zone_from_world))
assert len(self.train_zones_on_ortho) == len(self.train_zones)
if n_train_zone_shapes_out_of_orthomosaic > 0:
print("Warning, {} train zones shapes are out of orthomosaic".format(n_train_zone_shapes_out_of_orthomosaic))
area_threshold = 0.3
all_annotations = pd.DataFrame(columns=['image_path', 'xmin', 'ymin', 'xmax', 'ymax', 'label'])
nannotated_tiles = 0
if self.tiles_without_annotations_supported:
empty_tile_name = "empty_tile.jpg"
empty_tile = self.create_empty_tile()
cv2.imwrite(self.dir_train_subtiles + empty_tile_name, empty_tile)
# See https://github.com/weecology/DeepForest/issues/216
all_annotations = pandas_append(all_annotations, {'image_path': empty_tile_name, 'xmin': '0', 'ymin': '0', 'xmax': '0', 'ymax': '0', 'label': 'Tree'}, ignore_index=True)
nempty_tiles = 0
self.train_nannotations_in_zones = 0
for zone_i, shape in enumerate(self.train_zones):
if self.train_zones_on_ortho[zone_i] is None:
continue
zone_from, zone_to, zone_from_world = self.train_zones_on_ortho[zone_i]
annotations = []
for annotation in self.train_data:
assert(len(annotation.geometry.coordinates) == 1)
annotation_vertices = annotation.geometry.coordinates[0]
annotation_from = None
annotation_to = None
for p in annotation_vertices:
p = Metashape.CoordinateSystem.transform(p, self.chunk.shapes.crs, self.chunk.orthomosaic.crs)
p_in_ortho = np.int32(np.round(zone_from_world @ [p.x, p.y, 1]))
if annotation_from is None:
annotation_from = p_in_ortho
if annotation_to is None:
annotation_to = p_in_ortho
annotation_from = np.minimum(annotation_from, p_in_ortho)
annotation_to = np.maximum(annotation_to, p_in_ortho)
bbox_from, bbox_to = self.intersect(zone_from, zone_to, annotation_from, annotation_to)
if self.area(bbox_from, bbox_to) > self.area(annotation_from, annotation_to) * area_threshold:
annotations.append((annotation_from, annotation_to))
self.train_nannotations_in_zones += len(annotations)
print("Train zone #{}: {} annotations inside".format(zone_i + 1, len(annotations)))
border = self.patch_inner_border
inner_path_size = self.patch_size - 2 * border
zone_size = zone_to - zone_from
assert np.all(zone_size >= self.patch_size)
nx_tiles, ny_tiles = np.int32((zone_size - 2 * border + inner_path_size - 1) // inner_path_size)
assert nx_tiles >= 1 and ny_tiles >= 1
xy_step = np.int32(np.round((zone_size + [nx_tiles, ny_tiles] - 1) // [nx_tiles, ny_tiles]))
out_of_orthomosaic_train_tile = 0
for x_tile in range(0, nx_tiles):
for y_tile in range(0, ny_tiles):
tile_to = zone_from + self.patch_size + xy_step * [x_tile, y_tile]
if x_tile == nx_tiles - 1 and y_tile == ny_tiles - 1:
assert np.all(tile_to >= zone_to)
tile_to = np.minimum(tile_to, zone_to)
tile_from = tile_to - self.patch_size
if x_tile == 0 and y_tile == 0:
assert np.all(tile_from == zone_from)
assert np.all(tile_from >= zone_from)
tile = self.read_part(tile_from, tile_to)
assert tile.shape == (self.patch_size, self.patch_size, 3)
if np.all(tile == 255):
out_of_orthomosaic_train_tile += 1
continue
tile_annotations = []
for annotation_from, annotation_to in annotations:
bbox_from, bbox_to = self.intersect(tile_from, tile_to, annotation_from, annotation_to)
if self.area(bbox_from, bbox_to) > self.area(annotation_from, annotation_to) * area_threshold:
tile_annotations.append((bbox_from - tile_from, bbox_to - tile_from))
max_augmented_versions = 8
all_augmented_versions = list(range(max_augmented_versions))
augmented_versions = []
augmented_versions_to_add = max(1, self.data_augmentation_multiplier)
while augmented_versions_to_add > 0:
if augmented_versions_to_add < max_augmented_versions:
shuffled_augmented_versions = all_augmented_versions
random.shuffle(shuffled_augmented_versions)
augmented_versions.extend(shuffled_augmented_versions[:augmented_versions_to_add])
augmented_versions_to_add = 0
else:
augmented_versions.extend(all_augmented_versions)
augmented_versions_to_add -= max_augmented_versions
for version_i in augmented_versions:
tile_version = tile
tile_annotations_version = tile_annotations
is_mirrored = ((version_i % 4) == 1)
n90rotation = (version_i % 4)
if is_mirrored:
tile_annotations_version = self.flip_annotations(tile_annotations_version, tile_version)
tile_version = cv2.flip(tile_version, 0)
for rotation_i in range(n90rotation):
tile_annotations_version = self.rotate90clockwise_annotations(tile_annotations_version, tile_version)
tile_version = cv2.rotate(tile_version, cv2.ROTATE_90_CLOCKWISE)
tile_version = self.random_augmentation(tile_version)
tile_name = "{}-{}-{}-{}.jpg".format((zone_i + 1), x_tile, y_tile, version_i)
nannotated_tiles += 1
for (xmin, ymin), (xmax, ymax) in tile_annotations_version:
all_annotations = pandas_append(all_annotations, {'image_path': tile_name, 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, 'label': 'Tree'}, ignore_index=True)
if len(tile_annotations_version) == 0:
if self.tiles_without_annotations_supported:
all_annotations = pandas_append(all_annotations, {'image_path': tile_name, 'xmin': '0', 'ymin': '0', 'xmax': '0', 'ymax': '0', 'label': 'Tree'}, ignore_index=True)
nempty_tiles += 1
cv2.imwrite(self.dir_train_subtiles + tile_name, tile_version)
if self.debug_tiles:
tile_with_trees = self.debug_draw_trees(tile_version, tile_annotations_version)
cv2.imwrite(self.dir_train_subtiles_debug + tile_name, tile_with_trees)
if out_of_orthomosaic_train_tile == nx_tiles * ny_tiles:
raise RuntimeError("It seems that zone #{} has no orthomosaic data, please check zones, orthomosaic and its Outer Boundary.".format(zone_i + 1))
else:
if out_of_orthomosaic_train_tile > 0:
print("{}/{} of tiles in zone #{} has no orthomosaic data".format(out_of_orthomosaic_train_tile, nx_tiles * ny_tiles, zone_i + 1))
print("{} tiles ({} empty{}) for training prepared with {} annotations"
.format(nannotated_tiles, nempty_tiles, " - they are not supported" if (nempty_tiles > 0 and not self.tiles_without_annotations_supported) else "", len(all_annotations)))
print("Training with {} epochs and x{} augmentations (augment colors: {})...".format(self.max_epochs, self.data_augmentation_multiplier, self.augment_colors))
self.freeze_layers()
annotations_file = self.dir_train_subtiles + "annotations.csv"
all_annotations.to_csv(annotations_file, header=True, index=False)
class MyCallback(Callback):
def __init__(self, thiz_dlg):
self.nepochs_done = 0
self.nepochs = thiz_dlg.max_epochs
self.pbar = thiz_dlg.trainPBar
self.thiz_dlg = thiz_dlg
def on_epoch_end(self, trainer, pl_module):
self.nepochs_done += 1
self.pbar.setValue(self.nepochs_done * 100 / self.nepochs)
Metashape.app.update()
app.processEvents()
self.thiz_dlg.check_stopped()
if torch.cuda.device_count() > 0:
print("Using GPU...")
trainer_gpus = 1
trainer_auto_select_gpus = True
else:
print("Using CPU (will be very slow)...")
trainer_gpus = 0
trainer_auto_select_gpus = False
torch.set_num_threads(multiprocessing.cpu_count())
trainer = Trainer(max_epochs=self.max_epochs, gpus=trainer_gpus, auto_select_gpus=trainer_auto_select_gpus, callbacks=[MyCallback(self)], checkpoint_callback=False, logger=False)
train_ds = self.m.load_dataset(annotations_file, root_dir=os.path.dirname(annotations_file))
trainer.fit(self.m, train_ds)
self.results_time_training = time.time() - training_start
if len(self.save_model_path) > 0:
torch.save(self.m.model, self.save_model_path)
print("Model trained on {} annotations with {} m/pix resolution saved to '{}'".format(self.train_nannotations_in_zones, self.orthomosaic_resolution, self.save_model_path))
def create_empty_tile(self):
import numpy as np
empty_tile = np.zeros((self.patch_size, self.patch_size, 3), np.uint8)
empty_tile[:, :, :] = 255
return empty_tile
def freeze_all_layers(self):
for p in self.m.model.backbone.parameters():
p.requires_grad = False
def freeze_low_layers(self, freezeConv1=False, freezeUpToLevel=2):
body = self.m.model.backbone.body
layers = [body.layer1, body.layer2, body.layer3, body.layer4]
assert (0 <= freezeUpToLevel <= len(layers))
if freezeConv1:
for p in self.m.model.backbone.body.conv1.parameters():
p.requires_grad = False
for layer in layers[:freezeUpToLevel]:
for p in layer.parameters():
p.requires_grad = False
def freeze_layers(self):
self.freeze_low_layers(freezeConv1=False, freezeUpToLevel=2)
def flip_annotations(self, trees, img):
# x, y -> x, h-y
import numpy as np
h, w, cn = img.shape
flipped_trees = []
for bbox_from, bbox_to in trees:
assert np.all(bbox_from >= | np.int32([0, 0]) | numpy.int32 |
# -*- coding: utf-8 -*-
""" Class to generate signals with continous-time Markov chains
@author: <EMAIL>
"""
# %%
import numpy as np
import random
import scipy.linalg
import itertools
def _solve_least_squares(a, b):
rcond = None
solution = np.linalg.lstsq(a, b, rcond=rcond)[0]
return solution
class ChoiceGenerator:
""" Class to generate random elements with weighted selection """
def __init__(self, number_of_states, cum_weights, block_size=5000):
""" Class to generate random elements with weighted selection
This is a replacement for random.choices that is efficient when a large number of choices has to be generated.
Args:
number_of_states (int): number of choices that has to be generated
cum_weights (array[float]): cummulative probabilities of the choices
block_size (int): size of blocks of choices to generate
"""
if not number_of_states == len(cum_weights):
raise Exception('specification of cummulative weights does not match number of states')
self.number_of_states = number_of_states
self._idx = 0
self._block_size = block_size
self.cum_weights = cum_weights
self._generate_block()
def _generate_block(self):
values = np.random.rand(self._block_size, )
counts, _ = np.histogram(values, [0] + list(self.cum_weights))
self._block = np.hstack(tuple([choice_idx * np.ones(c, dtype=int) for choice_idx, c in enumerate(counts)]))
np.random.shuffle(self._block)
def generate_choice(self):
""" Generate a choice
Returns:
int: integer in the range 0 to the number of states
"""
self._idx = self._idx + 1
if self._idx == self._block_size:
self._idx = 0
self._generate_block()
return self._block[self._idx]
class ContinuousTimeMarkovModel:
def __init__(self, states, holding_parameters, jump_chain):
""" Class that represents a continous-time Markov chain
Args:
states (str[]): list with names for the states
holding_parameters(float[]): List with the holding parameters. The holding parameters determine the average
time before the system will make a jump to a new state
jump_chain (array): The jump chain or transition matrix. This matrix gives the probability for the system
to jump from a state to one of the other states. The sum of the probabilities in each column must
equal one.
For an introduction to Markov chains see https://www.probabilitycourse.com/chapter11/11_3_1_introduction.php
Also see: https://vknight.org/unpeudemath/code/2015/08/01/simulating_continuous_markov_chains.html
"""
self.states = states
self.update_model(holding_parameters, jump_chain)
def update_model(self, holding_parameters, jump_chain):
""" Update the model of the markov chain
Args:
holding_parameters: List with the holding parameters
jump_chain: The jump chain or transition matrix
For a detailed description of the parameters see the class documentation.
"""
self.holding_parameters = np.array(holding_parameters).flatten().reshape((-1, 1))
self.jump_chain = jump_chain
self.generator_matrix = self._create_generator_matrix(self.holding_parameters, self.jump_chain)
self._validity_check()
def _validity_check(self):
if len(self.states) != len(self.jump_chain):
raise AssertionError('States do not equal jump chain!')
if not np.allclose(np.sum(self.jump_chain, axis=0), 1):
raise AssertionError('Jump chain matrix should represent probabilities!')
if np.all(self.holding_parameters <= 0):
raise AssertionError('Not all holding parameter are bigger than zero!')
@staticmethod
def _create_generator_matrix(holding_parameters, jump_chain):
generator_matrix = np.array(jump_chain, copy=True)
for ii in range(generator_matrix.shape[0]):
generator_matrix[:, ii] = holding_parameters[ii] * jump_chain[:, ii]
for ii in range(generator_matrix.shape[0]):
generator_matrix[ii, ii] = -holding_parameters[ii]
return generator_matrix
def number_of_states(self):
""" Return the number of states in the model """
return len(self.states)
def transition_matrix(self, delta_time):
""" Return the transition matrix for a specified amount of time """
transition_matrix = scipy.linalg.expm(delta_time * self.generator_matrix)
return transition_matrix
def __repr__(self):
return "%s(id=0x%x, states=%s, generator=%s)" % (self.__class__.__name__,
id(self), self.states, self.generator_matrix)
def stationary_distribution_direct(self):
""" Return the stationary distribution of the model
The calculation method is taken from:
https://www.probabilitycourse.com/chapter11/11_3_2_stationary_and_limiting_distributions.php, Theorem 11.3
"""
pi_tilde = self.stationary_distribution_discrete(self.jump_chain)
norm = np.sum((pi_tilde / self.holding_parameters))
stationary_distribution = (pi_tilde / self.holding_parameters) / norm
return stationary_distribution
def stationary_distribution(self):
""" Return the stationary distribution of the model
The calculation method is taken from:
https://vknight.org/unpeudemath/code/2015/08/01/simulating_continuous_markov_chains.html
"""
Q = self.generator_matrix
n = Q.shape[0]
A = np.vstack((Q, np.ones((1, n))))
B = np.zeros((n + 1, 1))
B[-1] = 1
stationary_distribution = _solve_least_squares(A, B)
return stationary_distribution
@staticmethod
def stationary_distribution_discrete(jump_chain):
""" Return the stationary distrubution for a Markov chain """
n = jump_chain.shape[0]
A = np.vstack((jump_chain - np.eye(n), | np.ones((1, n)) | numpy.ones |
import argparse
import sys
import numpy as np
def get_args():
"""Define the task arguments with the default values.
Returns:
experiment parameters
"""
args_parser = argparse.ArgumentParser()
# Data files arguments
args_parser.add_argument(
'--files',
help='Filenames.',
nargs='+',
type=str,
default=['expval.data1','expval.data2']
)
return args_parser.parse_args()
def sort_expval_files(l):
d=[]
for f in l:
ind=f.find('expval.data')
if(ind==-1):
sys.exit("Filenames are not including the string 'expval.data'.")
d.append(int(f[ind+11:]))
d=np.array(d)
perm=np.argsort(d)
newl=[]
for i in range(d.shape[0]):
newl.append(l[perm[i]])
return newl
def main():
def _read_expval(filename):
gvec=[]
pmd=[]
pmd_sqr=[]
with open(filename) as f:
read_pmd2=False
lines=f.readlines()
for i in range(len(lines)):
if lines[i][:39]=='START POSITRON MOMENTUM DENSITY SQUARED':
read_pmd2=True
if lines[i][:19]=='Number of G-vectors':
Ng=int(lines[i+1])
if lines[i][:19] == 'G-vector components':
for j in range(1,Ng+1):
components=lines[i+j].split()
vec=[]
vec.append(float(components[0]))
vec.append(float(components[1]))
vec.append(float(components[2]))
gvec.append( | np.array(vec) | numpy.array |
####################################################################################################
### ###
### Functions for probabilistic assignment ###
### Author: <NAME> (EPFL) ###
### Last modified: 03.09.2021 ###
### ###
####################################################################################################
# Import libraries
import numpy as np
import time
import re
def increase_threshold(thresh, thresh_inc):
"""
Increase the threshold based on a string
Inputs: - thresh Initial threshold
- thresh_inc How the threshold should be increased.
"xN": multiply by N
"+N": increase by N
Output: - thresh Increased threshold
"""
if thresh_inc.startswith("x"):
return thresh * float(thresh_inc.split("x")[1])
elif thresh_inc.startswith("+"):
return thresh + float(thresh_inc.split("+")[1])
raise ValueError("Unkown threshold update: {}".format(thresh_inc))
def get_possible_assignments(scores, labels, exp, thresh=100., thresh_increase="x2"):
"""
Get all possible assignments for each probability distribution, given that if an individual probability of
assigning the distribution to a shift is more than "thresh" times higher than the probability of assigning it
to another shift, the latter possibility is discarded.
Inputs: - scores Array of individual scores
- labels List of labels of the distributions
- exp List of experimental shifts
- thresh Relative probability threshold to discard a score
- thresh_increase How the threshold should be increased.
"xN": multiply by N
"+N": increase by N
Output: - possible_assignments Lists of possible assignments for each distribution
- thresh Updated relative probability threshold to discard a score
"""
# If we do not set a threshold, consider all assignments as possible
# NOT RECOMMENDED, scaling is factorial!
if thresh < 0.:
print("WARNING: Not setting a threshold for considering plausible assignments is not recommended, scaling is factorial!")
possible_assignments = [list(range(len(exp))) for _ in scores]
return possible_assignments, thresh
consistent = False
while not consistent:
cleaned_scores = np.copy(scores)
for i in range(len(cleaned_scores)):
m = np.max(cleaned_scores[i])
# Discard assignments that have an individual score lower than 1/thresh times the maximum score
cleaned_scores[i, cleaned_scores[i] < m / thresh] = 0.
# Get the possible assignments
possible_assignments = []
for i, s in enumerate(cleaned_scores):
possible_assignments.append(list(np.where(s > 0.)[0]))
# Clean up assignments, i.e. if only one distribution can be assigned to a given shift, then it must be assigned to that shift
change = True
while change:
change = False
# Loop over all distributions with only one equivalent
for i, a in enumerate(possible_assignments):
if len(a) > 1 and "/" not in labels[i]:
# Loop over all possible assignments
for ai in a:
found = False
for j, a2 in enumerate(possible_assignments):
if i != j and ai in a2:
found = True
break
# If the specific assignment is not found anywhere else, the shift can only be assigned to distribution i
if not found:
cleaned_scores[i,[aj for aj in a if aj != ai]] = 0.
possible_assignments[i] = [ai]
change = True
break
if change:
break
consistent = True
# Check that each distribution can be assigned to at least one experimental shift
for s in cleaned_scores:
if np.sum(s) == 0.:
consistent = False
thresh = increase_threshold(thresh, thresh_increase)
break
# Check that each experimental shift can be assigned to at least one distribution
for s in cleaned_scores.T:
if np.sum(s) == 0.:
consistent = False
thresh = increase_threshold(thresh, thresh_increase)
break
print(" Scores cleaned up, threshold set to {}\n".format(thresh))
# When assigning 2D distributions, if two central or neighbouring atoms in different graphs are the same,
# then we merge all possible assignments
if "-" in labels[0]:
complete_asns = []
for i, (l, a) in enumerate(zip(labels, possible_assignments)):
complete_asns.append(a)
if "/" in l:
ls = [tmp.split("-") for tmp in l.split("/")]
for i1, l1 in enumerate(ls):
for i2, l2 in enumerate(ls):
if i2 > i1:
for k, (l1k, l2k) in enumerate(zip(l1, l2)):
if l1k == l2k:
for ai in complete_asns[i]:
match = exp[ai].split("\\")[k]
for j, e in enumerate(exp):
if match == e.split("\\")[k] and e not in [exp[aj] for aj in complete_asns[i]]:
complete_asns[i].append(j)
possible_assignments = []
for a in complete_asns:
possible_assignments.append(list(np.sort(a)))
return possible_assignments, thresh
def get_assignment_pool(possible_asn, assigned):
"""
Get an assignment pool, i.e. a set of distributions and shifts to assign independently from the rest
Inputs: - possible_asn List of possible assignments for each distribution
- assigned List of already assigned distributions
Outputs: - dist_pool Pool of distributions
- shift_pool Shifts in the pool of distributions
"""
# Initialize arrays of possible shifts and distributions
dist_pool = []
shift_pool = []
# Loop over all distribution
for i, a in enumerate(possible_asn):
if i not in assigned:
# Get the possible shifts for the first not already assigned distribution
shift_pool.extend(a)
dist_pool.append(i)
break
change = True
while change:
change = False
# Loop over all distributions
for i, a in enumerate(possible_asn):
if i not in dist_pool and i not in assigned:
for ai in a:
# If any of the possible shifts for this distribution is within the pool, add this distribution
# to the possible assignment pool
if ai in shift_pool:
dist_pool.append(i)
shift_pool.extend([aj for aj in a if aj not in shift_pool])
change = True
break
return dist_pool, shift_pool
def generate_global_asns(possible_asns, scores, n_dist, n_exp, ls, es, equiv,
global_asns=[], already_linked=[], rank=0, max_asn=None,
r_max_asn=0, max_excess=None, disp_rank=-1, t_start=None):
"""
Recursively generate global assignments given possible individual assignments.
Inputs: - possible_asns Possible assignments for each nucleus/distribution
- scores Matrix of scores for individual assignments
- n_dist Number of distributions
- n_exp Number of experiments
- ls Labels of the nuclei/distributions
- es Experimental shifts
- equiv Equivalent nuclei/distributions
- global_asns Already found global assignments
- already_linked Already assigned nuclei/distributions
- rank Assignment rank
- max_asn Maximum number of assignments to consider
- r_max_asn Rank from which to start reducing the number of assignments
- max_excess Maximum excess for assignment (defined as the maximum
number of nuclei/distributions assigned
to a single experimental shift minus one)
- disp_rank Rank up to which progress should be displayed
- t_start Starting time
Output: - global_asns List of global assignments generated
"""
# If all distributions are already assigned, append the assignment found
if rank == n_dist:
# Sort the equivalent parts
sorted_asn = already_linked.copy()
already_eq = []
for eq in equiv:
if len(eq) > 1 and eq[0] not in already_eq:
tmp = sorted([already_linked[i] for i in eq])
for i, e in enumerate(eq):
sorted_asn[e] = tmp[i]
already_eq.extend(eq)
# Return the list of already generated global assignments
global_asns.append(sorted_asn)
return global_asns
# Get the number of assignments to consider for this distribution
rank_len = len(possible_asns[rank])
if max_asn is not None and rank >= r_max_asn:
rank_len = min(max_asn, rank_len)
# Get the intermediate scores
these_scores = []
for a in possible_asns[rank]:
# Generate the assignment
this_asn = already_linked.copy()
this_asn.append(a)
# Check if the assignment is valid so far (get the excess and maximum individual excess)
excess = 0
ind_excess = 0
for j in np.unique(this_asn):
excess += max(this_asn.count(j) - 1, 0)
ind_excess = max(ind_excess, this_asn.count(j) - 1)
# Check that same nuclei are assigned to the same shift (important for 2D simulated experiments)
for i, a1 in enumerate(this_asn):
for j, a2 in enumerate(this_asn):
if i < j:
for k in range(len(ls[i])):
# If the same nucleus is assigned to two different shifts, discard the assignment
if ls[i,k] == ls[j,k] and es[a1,k] != es[a2,k]:
excess = n_dist - n_exp + 1
break
if excess > n_dist - n_exp:
break
if excess > n_dist - n_exp:
break
# If the assignment is valid so far, try to assign the next distribution
if excess > n_dist - n_exp or (max_excess is not None and ind_excess > max_excess):
score = 0.
else:
# Get the corresponding score
score = 1.
for i, a in enumerate(this_asn):
score *= scores[i, a]
these_scores.append(score)
# Sort the intermediate scores by decreasing value
score_inds = np.argsort(these_scores)[::-1]
# Remove instances where the score is zero
score_inds = score_inds[: | np.count_nonzero(these_scores) | numpy.count_nonzero |
#! /usr/bin/python3
import numpy as np
import random as rnd
import functions
import pandas as pd
class NAgent:
nnetfileName = "./data/garry_007.nn"
user_id = None
case_id = None
url = None
nnet = None
rmsprop_cache = None
grad_buffer = None
prev_act = None
prev_score = None
prev_hash = None
prev_weights_dict = None
decay_rate = 0.95 # decay factor for RMSProp leaky sum of grad^2
alpha = 0.01 # learning rate
gamma = 0.51 # discount coef
delta = 0.0001 # LR descent coef
batch_size = 10
help_degree = 0.2 # part of help of correct weights
dropout = 0.5 # part of neurons in hidden layers to dropout
xs, hs, h2s, errs, zs, rs = [], [], [], [], [], []
a_xs, a_hs, a_h2s, a_zerrs = None, None, None, None
episode = pd.DataFrame(columns= ['hash_s', 'act', 'reward', 'hash_news'])
gamesQ = 0 # счетчик игр - нужен для реализации батча при обучении
all_acts_dict = {
"none": ["noAct", "noAct"], "take": ["noAct", "Take"],
"go_forward": ["noAct", "Go"], "go_right": ["onRight", "Go"],
"go_back": ["upSideDn", "Go"], "go_left": ["onLeft", "Go"],
"shoot_forward": ["noAct", "Shoot"], "shoot_right": ["onRight", "Shoot"],
"shoot_back": ["upSideDn", "Shoot"], "shoot_left": ["onLeft", "Shoot"]
}
all_acts_nums_dict = {
"none": 0, "take": 1, "go_forward": 2, "go_right": 3, "go_back": 4,
"go_left": 5, "shoot_forward": 6, "shoot_right": 7, "shoot_back": 8,
"shoot_left": 9
}
all_acts_list = [ "none", "take", "go_forward", "go_right", "go_back", "go_left",
"shoot_forward", "shoot_right", "shoot_back", "shoot_left" ]
colnames = ["hash", "take", "go_forward", "go_right", "go_back", "go_left", "shoot_forward", "shoot_right",
"shoot_back", "shoot_left"]
fin_codes = ['--- agent is dead ---', '---- time is over ---', '!!! agent is WINNER !!!']
# PUBLIC METHODS ==============================================================
def playGame(self, map_num, alpha, gamma, batch_size=10, tid=0, hashid=0):
self.alpha = alpha
self.gamma = gamma
self.batch_size = batch_size
self.help_degree = 0.5
self.dropout = 0.5
request_code = None # код завершения хода
curr_score = None # набранные очки
# запрашиваем состояние начальной пещеры, выполняя пустой ход
acts = self.all_acts_dict["none"]
request = functions.connectToServer(self.user_id, self.case_id, map_num, acts, tid, hashid)
if request != None: # связь с сервером установлена, можно играть
# распарсиваем ответ сервера
request_error = request["error"]
percept = request["text"]
curr_score = percept['iagent']["score"]
# инициализация переменных, фиксирующих предыдущее состояние и ход
curr_hash = self.__getHash__(percept)
self.prev_act = "none"
self.prev_score = curr_score
self.prev_hash = curr_hash
# создание таблицы ходов для запоминания игры (эпизода)
rec = {'hash_s': [curr_hash], 'act': ["none"], 'reward': [0], 'hash_news': [curr_hash]}
self.episode = pd.DataFrame(columns= rec.keys())
# начинаем игру
while request_error == None: # пока никакой ошибки нет (нет завершения игры)
if request != None:
''' # выбираем для текущего состояния ход, если состояние новое, то добавляем его в базу
политики (полезностей) +4 записи; корректируем кол-во новых полей в базе данных '''
curr_act = self.__chooseAct__(curr_hash)
acts = self.all_acts_dict[curr_act]
# запоминаем набранное до выбранного хода кол-во очков и хэш текущего состояния s, выбранное действие
self.prev_score = curr_score
self.prev_hash = curr_hash
self.prev_act = curr_act
# запрашиваем ответ от сервера: сообщаем серверу выбранный ход и получаем новое состояние s'
request = functions.connectToServer(self.user_id, self.case_id, map_num, acts, tid, hashid)
if request != None:
# распарсиваем ответ сервера
request_error = request["error"]
percept = request["text"]
curr_score = percept["iagent"]["score"]
request_code = int(percept["code"])
curr_hash = self.__getHash__(percept)
# ----- дополнение таблицы ходов и списка бонусов информацией о новом ходе -----
reward = curr_score - self.prev_score
self.rs.append(reward)
# ---------- эта таблица нужна только для контроля ----------------
rec = {'hash_s': [self.prev_hash], 'act': [curr_act], 'reward': [reward], 'hash_news': [curr_hash]}
step1 = pd.DataFrame(data= rec)
self.episode = pd.concat([self.episode, step1])
# обновление полезности последнего хода и обучение нейросети
if request_code in [0, 1, 2]: # игра завершилась, обучение агента
self.gamesQ += 1 # счетчик игр
self.episode.index = list(range(self.episode.shape[0]))
self.__update_nnet__()
print('------ Код завершения = ', self.fin_codes[request_code], ' --------')
else:
print("WARNING! Server was not responded.")
return request_code, curr_score, self.gamesQ
# PRIVATE METHODS =============================================================
def __init__(self, sess_parms, nnet_parms):
self.user_id = sess_parms[0]
self.case_id = sess_parms[1]
self.tid = sess_parms[2]
self.hashid = sess_parms[3]
self.__createNnet__(nnet_parms)
# создать или загрузить нейросеть
def __createNnet__(self, nnet_parms):
self.nnet = functions.openNnet(nnet_parms)
# сохранить нейросеть
def __saveNnet__(self):
functions.saveNnet(self.nnet, self.nnetfileName)
def __getHash__(self, percept):
"""
:param percept: полученное текущее восприятие ситуации
:return: хэш, символьная строка, кодирующая ситуацию
"""
is_monster_alive = str(int(percept["worldinfo"]["ismonsteralive"]))
newcaveopenedQ = len(percept["iagent"]["knowCaves"])
# !!!!!!!!!!! ----- ONLY FOR 4x4 ------- !!!!!!!!!!!!
unknowncavesQ = 16 - newcaveopenedQ
if unknowncavesQ > 2:
unknowncave_count = '3'
else:
unknowncave_count = str(unknowncavesQ)
arrow_count = str(int(percept["iagent"]["arrowcount"]))
legs_count = str(int(percept["iagent"]["legscount"]))
curr_cave = percept["currentcave"]
curr_cave_state = str(int(curr_cave["isGold"])) + str(int(curr_cave["isWind"])) \
+ str(int(curr_cave["isBones"])) + str(int(curr_cave["isHole"]))
front_cave_state = self.__getNearCaveState__(percept["perception"]["front_cave"])
back_cave_state = self.__getNearCaveState__(percept["perception"]["behind_cave"])
left_cave_state = self.__getNearCaveState__(percept["perception"]["left_cave"])
right_cave_state = self.__getNearCaveState__(percept["perception"]["right_cave"])
front_left_cave_state = self.__getNearCaveState__(percept["perception"]["front_left_cave"])
front_right_cave_state = self.__getNearCaveState__(percept["perception"]["front_right_cave"])
behind_left_cave_state = self.__getNearCaveState__(percept["perception"]["behind_left_cave"])
behind_right_cave_state = self.__getNearCaveState__(percept["perception"]["behind_right_cave"])
res = is_monster_alive + arrow_count + legs_count + unknowncave_count + curr_cave_state
res = res + front_left_cave_state + front_cave_state + front_right_cave_state + right_cave_state
res = res + behind_right_cave_state + back_cave_state + behind_left_cave_state + left_cave_state
return res
# подправить искусственно веса, используя состояние пещеры
# weights - np.array весов: weights = np.array(curr_weights_row[1:])
# hash - строка хеш-кода
def __correctWeights__(self, weights, hash, min_w=0, max_w=1):
actshift = {"go":1, "shoot":5}
#dirshift = {"forward":0, "back":1, "left":2, "right":3}
dirshift = {"forward":0, "right":1, "back":2, "left":3}
#caveshifts = {"forward":7, "back":11, "left":15, "right":19}
caveshifts = {"forward":12, "right":20, "back":28, "left":36}
if hash[4] == '1': # надо брать клад
weights = np.ones(len(weights)) * min_w
weights[0] = max_w
else:
weights[0] = min_w # клада нет - брать не надо
if (hash[6] == '0'): # лучше не стрелять - если рядом нет монстра
for ii in range(4):
weights[actshift["shoot"] + ii] = min_w+0.01
if (hash[0] == '0') or (hash[1] == '0'): # не надо пытаться стрелять - монстр мертв или стрел нет
for ii in range(4):
weights[actshift["shoot"] + ii] = min_w
for cavedir in ["forward", "right", "back", "left"]:
if hash[caveshifts[cavedir]] == '2': # wall
weights[actshift["go"]+dirshift[cavedir]] = min_w
weights[actshift["shoot"] + dirshift[cavedir]] = min_w
if ((hash[caveshifts[cavedir] + 3] == '1') and (hash[2] == '1')): # не надо ходить в яму, если одна нога!!!
weights[actshift["go"] + dirshift[cavedir]] = min_w
return weights
# получить состояние пещеры
def __getNearCaveState__(self, cave):
cave_state = "2222"
if cave["isWall"] == 0:
cave_state = "0222"
if cave["isVisiable"] == 1: # Обновлено!!! - считается, что пещера ВИДИМА!!
cave_state = "1" + str(int(cave["isWind"])) + str(int(cave["isBones"])) + str(int(cave["isHole"]))
return cave_state
# получить случайное действие с вероятностью, зависящей от его веса
def __getActionByWeight__(self, curr_weights_dict):
acts = np.array(list(curr_weights_dict.keys()))
weights = np.array(list(curr_weights_dict.values()), dtype=float)
# исключаем из лотереи заведомо проигрышные ходы и снижаем вероятность выбора просто плохих ходов
limit_weight = 0 # устанавливаем порог заведомо проигрышных ходов
max_weight = np.max(weights)
#if (max_weight <= limit_weight): limit_weight = max_weight - 10 # страхуем себя на случай безвыходной ситуации
if (max_weight <= limit_weight): limit_weight = weights[weights.argsort()[-2]] # страхуем себя на случай безвыходной ситуации
acts = acts[weights >= limit_weight]
weights = weights[weights >= limit_weight]
min_weight = np.min(weights)
weights = weights - min_weight + 0.001
weights_array = weights / np.sum(weights)
curr_act = rnd.choices(population=list(acts), weights=weights_array)[0]
acts = self.all_acts_dict[curr_act]
return curr_act, acts
# преобразуем символьный хэш в числовой входной вектор для нейросети
def __hash2vec__(self, curr_hash):
vec = np.zeros(len(curr_hash))
split_str = list(curr_hash)
split_str = (' '.join(list(split_str)))
vec = [int(x) for x in split_str.split()]
return vec
# строит вектор one-hot в зависимости от выбранного действия (100000000 - для 'take')
def __act_to_yvec__(self, curr_act):
act_yvec = np.zeros(9)
if curr_act == "take":
act_yvec[0] = 1
if curr_act == "go_forward":
act_yvec[1] = 1
if curr_act == "go_right":
act_yvec[2] = 1
if curr_act == "go_back":
act_yvec[3] = 1
if curr_act == "go_left":
act_yvec[4] = 1
if curr_act == "shoot_forward":
act_yvec[5] = 1
if curr_act == "shoot_right":
act_yvec[6] = 1
if curr_act == "shoot_back":
act_yvec[7] = 1
if curr_act == "shoot_left":
act_yvec[8] = 1
return act_yvec
def __discount_rewards__(self, r, gamma):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r) * 1.0
running_add = 0.0
for t in reversed(range(0, r.size)):
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# выбираем для текущего состояния c curr_hash ход,
# если состояние новое, то добавляем его в базу политики (полезностей) +4
def __chooseAct__(self, curr_hash):
#colnames = ["hash", "take", "go_forward", "go_right", "go_back", "go_left", "shoot_forward", "shoot_right", "shoot_back", "shoot_left"]
x = self.__hash2vec__(curr_hash)
# расчет выхода нейросети
ynet, h = functions.policy_forward(model= self.nnet, x=x)
if rnd.random() < self.help_degree:
# корректируем веса для очевидных случаев
weights = list(self.__correctWeights__(np.array(ynet).copy(), curr_hash))
else:
weights = list(ynet)
weights.insert(0, curr_hash)
curr_weights_row = tuple(weights)
curr_weights_dict = dict(zip(self.colnames, curr_weights_row))
del curr_weights_dict["hash"]
self.prev_weights_dict = curr_weights_dict
curr_act, acts = self.__getActionByWeight__(curr_weights_dict)
# ------------------ запоминаем состояние нейросети для будущего обучения ------------
# record various intermediates (needed later for backprop)
self.xs.append(x) # gane state - input for nnet
self.hs.append(h) # hidden state
#self.zs.append(z) # observation
yvec = self.__act_to_yvec__(curr_act)
self.errs.append(yvec - ynet) # error of the action that was taken to be take
return curr_act
# -----------------------------------------------------------------------------------------
# -------------- обучение нейросети на результатах завершившейся игры ---------------------
def __update_nnet__(self):
"""
обновление полезности всех сделанных ходов после завершения одной игры
:param self.episode: содержит результаты всех ходов в иде кортежа (hash_s, act, reward, hash_news)
:param alpha: с параметром обучения
:param gamma: и с параметром дисконта
:return: обновление базы данных или Q-таблицы
"""
model = self.nnet
self.rmsprop_cache = { k : np.zeros_like(v) for k,v in model.items() }
self.grad_buffer = { k : np.zeros_like(v) for k,v in model.items() }
# stack together all inputs, hidden states, action errors, and rewards for this episode
a_xs = np.vstack(self.xs)
a_hs = np.vstack(self.hs)
#a_zs = np.vstack(self.zs)
a_errs = np.vstack(self.errs)
a_rs = | np.vstack(self.rs) | numpy.vstack |
""" Mesh module.
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import array
from itertools import islice
import mimpy.mesh.mesh_cython as mesh_cython
import mimpy as mimpy
from six.moves import map
from six.moves import range
from six.moves import zip
def tb(s):
""" Encodes strings for compatibility
with Python 3.
"""
return s.encode()
class variable_array():
""" The class is an efficient reprenstation of variable
lenght two dimensional arrays. It can represent
basic data types such as ints and floats and allows variable
lengths on entries. That is:
a[0] = [1, 2, 3, 4,]
a[1] = [1, 2, 3, 4, 5, 6, 7]
Internally, the data is stored is a 1-d array, with a
separate array indicating the offset and data lenth:
data = [1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7]
offset = [[0, 4], [5, 7]]
The structure allows the user to modify the entries
as well as extend the data as needed.
"""
def __init__(self, dtype=np.dtype('d'), size=(2, 2), dim = 1):
self.pointer_capacity = size[0]
self.data_capacity = size[1]
self.dim = dim
self.dtype = dtype
self.pointers = np.empty(shape=(self.pointer_capacity, 2),
dtype=np.dtype("i"))
if self.dim == 1:
self.data = np.empty(shape = (self.data_capacity),
dtype=self.dtype)
else:
self.data = np.empty(shape = (self.data_capacity, self.dim),
dtype=self.dtype)
self.number_of_entries = 0
self.next_data_pos = 0
def __del__(self):
del self.pointers
del self.data
def set_pointer_capacity(self, capacity):
""" Sets the maximum number of entries in
the data structure.
:param int capacity: Number of expected entries.
:return: None
"""
self.pointer_capacity = capacity
self.pointers.resize((self.pointer_capacity, 2), refcheck = False)
def set_data_capacity(self, capacity):
""" Sets the maximum number of entries in
the data structure.
:param int capacity: Total data entries.
:return: None
"""
self.data_capacity = capacity
if self.dim == 1:
self.data.resize((self.data_capacity), refcheck=False)
else:
self.data.resize((self.data_capacity, self.dim), refcheck=False)
def new_size(self, size, minimum = 1000):
""" Calculates the new size of the array
given the old in case there is a need
for extending the array.
:param int size: Old data size.
:param int minimum: Sets minimum new data size.
:return: New data structure size.
:rtype: int
"""
return max(size+size/2+2, minimum)
def add_entry(self, data):
""" Adds new data to end of the list.
:param dtype data: Generic data to be added. Usually\
either scalar type (float, int ...) or ndarray type.
:return: Index of the new entry.
:rtype: int
"""
if self.number_of_entries < len(self.pointers):
self.pointers[self.number_of_entries, 0] = self.next_data_pos
self.pointers[self.number_of_entries, 1] = len(data)
else:
new_array_size = self.new_size(len(self.pointers), len(data))
self.pointers.resize((new_array_size, 2),
refcheck=False)
self.pointers[self.number_of_entries, 0] = self.next_data_pos
self.pointers[self.number_of_entries, 1] = len(data)
if len(self.data) > self.next_data_pos+len(data):
self.data[self.next_data_pos:self.next_data_pos+len(data)] = data
else:
if self.dim == 1:
self.data.resize(self.new_size(len(self.data)),
refcheck=False)
else:
self.data.resize((self.new_size(len(self.data)), self.dim),
refcheck=False)
self.data[self.next_data_pos:self.next_data_pos+len(data)] = data
self.next_data_pos = len(data)+self.next_data_pos
self.number_of_entries += 1
return self.number_of_entries-1
def get_entry(self, index):
""" Return entry.
"""
if index > self.number_of_entries:
raise IndexError("No entry with index " +str(index))
(pos, length) = self.pointers[index]
return self.data[pos:pos+length]
def __getitem__(self, index):
"""Overloaded get index function.
"""
return self.get_entry(index)
def __setitem__(self, index, value):
""" Overloaded setting function.
"""
self.set_entry(index, value)
def __len__(self):
""" Returns number of entries.
"""
return self.number_of_entries
def set_entry(self, index, data):
""" Changes existing entry to new data.
The new entry can be larger than old, but might cause
wasted memory.
"""
(pos, length) = self.pointers[index]
if length >= len(data):
self.data[pos:pos+len(data)] = data
self.pointers[index, 1] = len(data)
else:
if len(self.data) > self.next_data_pos+len(data):
self.data[self.next_data_pos:
self.next_data_pos+len(data)] = data
else:
if self.dim == 1:
self.data.resize(self.new_size(len(self.data)),
refcheck=False)
self.data[self.next_data_pos:
self.next_data_pos+len(data)] = data
else:
self.data.resize(self.new_size((len(self.data)), self.dim),
refcheck=False)
self.data[self.next_data_pos:
self.next_data_pos+len(data)] = data
self.pointers[index, 0] = self.next_data_pos
self.pointers[index, 1] = len(data)
self.next_data_pos += len(data)
class Mesh:
""" The **Mesh** class is a common representation of polygonal
meshes in Mimpy. In addition to the mesh data structure,
it provides commonly used mesh functions as such
calculating volumes and centroids as well as basic visualization.
The **Mesh** class serves as base implementation,
with the specific mesh types (such as hexahedra,
tetrahedra and Voronoi) inherting from it.
"""
def __init__(self):
# List of points used to construct mesh faces.
# Each point coordinate is prepresented by
# a Numpy array.
self.points = np.empty(shape=(0, 3), dtype=np.dtype('d'))
self.number_of_points = 0
# List of mesh faces, each face is represented by the
# a list of points. In 2D, it's a list of pairs of poitns.
# In 3D, it's an ordered list of points that make up the
# polygon.
self.faces = variable_array(dtype=np.dtype('i'))
# Face normals.
self.face_normals = np.empty(shape=(0, 3), dtype=np.dtype('d'))
# Area of mesh face.
self.face_areas = np.empty(shape=(0), dtype=np.dtype('d'))
# The centroid of face.
self.face_real_centroids = np.empty(shape=(0, 3))
# Dict that maps faces to the cells
# they are in.
self.face_to_cell = np.empty(shape=(0, 2), dtype=np.dtype('i'))
# A point on the plane of the face that is used
# to build the MFD matrix R. This point does
# not have to be on the face itself.
self.face_shifted_centroids = np.empty(shape=(0, 3))
self.has_face_shifted_centroid = False
self.has_cell_shifted_centroid = False
self.has_alpha = False
self.boundary_markers = []
self.boundary_descriptions = []
# Hash from face marker => [[face index, face normal orientation]]
self.boundary_faces = {}
# List of cells. Each cell is made up of a list
# of faces.
self.cells = variable_array(dtype=np.dtype('i'))
# For each cell, a list of bools indicating
# whether the normal in self.face_normals
# is in or out of the cell.
self.cell_normal_orientation = variable_array(dtype=np.dtype('i'))
# Cell Volumes.
self.cell_volume = np.empty(shape=(0), dtype=np.dtype('d'))
# List of cell centroids.
self.cell_real_centroid = np.empty(shape=(0, 3), dtype=np.dtype('d'))
# Points used inside the cell used
# to build the MFD matrix R.
self.cell_shifted_centroid = np.empty(shape=(0, 3), dtype=np.dtype('d'))
self.cell_k = np.empty(shape=(0, 9))
# Tags cells depending on which domain
# they belong to (for fractures and
# multi-domain problems)
self.cell_domain = np.empty(shape=(0), dtype=int)
self.cell_domain_tags = set([0])
self.dim = 3
# dict: {face_index: (cell_index, face_orientation), ...}
# Allows Dirichlet boundaries to be set implicitly
# based on pressure of cells.
self.dirichlet_boundary_pointers = {}
self.periodic_boundaries = []
# Faces designated as no flow, meant for
# interior boundary conditions not to be
# set by user.
self.internal_no_flow = []
# dict: {face_index: (lagrange_index, orientation)}
# Allows dirichlet boundaries to point to
# lagrange multipliers for domain decomposition.
# A lagrange multiplier is a face identicial
# to the one pointing to it, but not associated
# with any cells.
self.face_to_lagrange_pointers = {}
# dict: lagrange_index: [(face_index_1, orientation), ...], ...}
# Points a lagrange multiplier to faces associated
# with it. Is treated like a forcing function.
self.lagrange_to_face_pointers = {}
# dict: {cell_index: [(face_index_1, orientation), ...], ...}
# Allows source terms to be set implicitly
# based on fluxes at other faces.
self.forcing_function_pointers = {}
# Lowest order term coef
# List: [alpha1, alpha2, ...]
self.cell_alpha = []
self.is_using_alpha_list = False
self.gravity_vector = None
self.gravity_acceleration = 9.8
def add_point(self, new_point):
""" Takes a Numpy array
representing the cartesian point coodrinates,
and appends the point to the end of the point list.
Returns the index of the new point.
:param ndarray new_point: New point to be added to mesh.
:return: Index of new point.
:rtype: int
"""
if self.number_of_points < len(self.points):
self.points[self.number_of_points] = new_point
self.number_of_points += 1
else:
new_array_size = (len(self.points)+len(self.points)/2+1, 3)
self.points.resize(new_array_size, refcheck=False)
self.points[self.number_of_points] = new_point
self.number_of_points += 1
return self.number_of_points-1
def get_point(self, point_index):
""" Takes a point index and returns
a Numpy array of point coodrinates.
:param int point_index:
:return: The the point coordinates.
:rtype: ndarray
"""
return self.points[point_index]
def get_number_of_points(self):
""" Returns the total number of points.
:return: Total number of points in mesh.
:rtype: int
"""
return self.number_of_points
def _memory_extension(self, size):
""" Function for finding size of memory
extension jumps.
"""
return size+size/2+1
def add_face(self, list_of_points):
""" Takes a list of point indices, and
appends them to the list of faces
in the mesh. The point indices must
be oriented in a clockwise direction
relative to the face normal. In 2D, a
face is represnted by two points.
Returns the index of the new face.
:param list list_of_points: List of point indices\
making up the new face.
:return: Index of new face.
:rtype: int
"""
new_face_index = self.faces.add_entry(list_of_points)
if len(self.face_normals)-1 < new_face_index:
new_size = self._memory_extension(len(self.face_normals))
self.face_normals.resize((new_size, 3), refcheck=False)
if len(self.face_areas)-1 < new_face_index:
new_size = self._memory_extension(len(self.face_areas))
self.face_areas.resize(new_size, refcheck=False)
if len(self.face_real_centroids)-1 < new_face_index:
new_size = self._memory_extension(len(self.face_real_centroids))
self.face_real_centroids.resize((new_size, 3), refcheck=False)
if len(self.face_to_cell)-1 < new_face_index:
new_size = self._memory_extension(len(self.face_to_cell))
self.face_to_cell.resize((new_size, 2))
self.face_to_cell[new_face_index:, :] = -1
if self.has_face_shifted_centroid:
if len(self.face_shifted_centroids)-1 < new_face_index:
new_size = self._memory_extension(
len(self.face_shifted_centroids))
self.face_shifted_centroids.resize((new_size, 3),
refcheck=False)
return new_face_index
def set_face(self, face_index, points):
""" Sets a new set of points for a given face_index.
:param int face_index: Face index of face to be set.
:param list points: New list of points making up face.
:return: None
"""
self.faces[face_index] = points
def remove_from_face_to_cell(self, face_index, cell_index):
""" Removes the cell_index from face_to_cell map
at for face_index.
:param int face_index: Face index.
:param int cell_index: Cell index.
:return: None
"""
if self.face_to_cell[face_index, 0] == cell_index:
self.face_to_cell[face_index, 0] = -1
elif self.face_to_cell[face_index, 1] == cell_index:
self.face_to_cell[face_index, 1] = -1
else:
raise Exception("cell_index " + str(cell_index)+
" not found in face_to_cell for "+
str(face_index))
def add_to_face_to_cell(self, face_index, cell_index):
""" Adds cell_index to face_to_cell map
at face_index.
:param int face_index: The face index.
:param int cell_index: The cell index that will be associated\
with the face.
"""
if self.face_to_cell[face_index, 0] == -1:
self.face_to_cell[face_index, 0] = cell_index
elif self.face_to_cell[face_index, 1] == -1:
self.face_to_cell[face_index, 1] = cell_index
else:
raise Exception("cell_index " + str(cell_index)+
" could not be added to "+
str(face_index))
def duplicate_face(self, face_index):
""" Creates new face with all the properties
of the face_index, and adds the face to the
bottom of the face list. The function
returns the new face index.
:param int face_index: Face index to be duplicated.
:return: Face index of new duplicated face.
:rtype: int
"""
# Proper duplication requires duplicating
# all the properties fo the face.
new_index = self.add_face(self.get_face(face_index))
self.set_face_area(new_index, self.get_face_area(face_index))
return new_index
def get_face(self, face_index):
""" Given a face index, returns the
list of point indices that make
up the face.
:param int face_index: Face index.
:return: List of points making up face.
:rtype: ndarray('i')
"""
return self.faces[face_index]
def get_number_of_face_points(self, face_index):
""" Returns the number of points that make
up a given face.
:param int face_index: Face index.
:return: Number of point making up the face.
:rtype: int
"""
return len(self.faces[face_index])
def get_number_of_faces(self):
""" Returns the total number of faces
in the mesh. This corresponds to the
number of velocity degrees of freedom.
:return: Total number of faces in the mesh.
:rtype: int
"""
return self.faces.number_of_entries
def get_number_of_cell_faces(self, cell_index):
""" Returns the number of faces for cell_index
:param int cell_index: Cell index.
:return: Number of faces in cell.
:rtype: int.
"""
return len(self.cells[cell_index])
def get_face_to_cell(self, face_index):
""" Get list of cells connected with
face_index.
:param int face_index: Face index.
:return: List of cell indices connected to the face.
:rtype: list
"""
f_to_c = list(self.face_to_cell[face_index])
f_to_c = [x for x in f_to_c if x >= 0]
return f_to_c
# Sets the face shifted centroid to the intersection
# of the line joining the two face centroids and the
# face between them. Used for forcing a TPFA type
# method to the matrix.
def set_face_shifted_to_tpfa_all(self):
for face_index in range(len(self.faces)):
cells = self.get_face_to_cell(face_index)
if (len(cells)==2):
cent1 = self.get_cell_real_centroid(cells[0])
cent2 = self.get_cell_real_centroid(cells[1])
vector = cent2 - cent1
vector /= np.linalg.norm(vector)
d = np.dot((self.get_face_real_centroid(face_index) - cent1),
self.get_face_normal(face_index))
denom = np.dot(vector, self.get_face_normal(face_index))
d /= denom
intersection_point = d*vector+cent1
self.set_face_shifted_centroid(face_index, intersection_point)
else:
self.set_face_shifted_centroid(face_index,
self.get_face_real_centroid(face_index))
def is_line_seg_intersect_face(self, face_index, p1, p2):
""" Returns True if the line segment
intersects with a face.
:param int face_index: Face index.
:param ndarray p1: Coorindates of first point.
:param ndarray p2: Coorindates of second point.
:return: True if line segments intersects face.
:rtype: bool
"""
vector = p2 - p1
vector /= np.linalg.norm(vector)
d = np.dot((self.get_face_real_centroid(face_index) - p1),
self.get_face_normal(face_index))
denom = np.dot(vector, self.get_face_normal(face_index))
if abs(denom) < 1e-10:
pass
else:
d /= denom
length = np.linalg.norm(p1-p2)
if d <= length+1.e-8 and d > 0.+1.e-8:
intersection_point = d*vector+p1
direction = np.zeros(len(self.get_face(face_index)))
normal = self.get_face_normal(face_index)
current_point = self.get_point(self.get_face(face_index)[-1])
for (local_index, next_point_index) in \
enumerate(self.get_face(face_index)):
next_point = self.get_point(next_point_index)
face_vec = next_point - current_point
check_vec = current_point - intersection_point
direction[local_index] = np.dot(np.cross(face_vec,
check_vec),
normal)
current_point = next_point
if (direction>0.).all():
return True
elif (direction<0.).all():
return True
else:
return False
def initialize_cells(self, number_of_cells):
""" Initialize cell data structure
for known number of cells.
"""
raise NotImplementedError
def load_mesh(self, input_file):
""" Loads mesh from mms file.
:param file intput_file: Mesh file (mms).
:return: None
"""
version = next(input_file)
date = next(input_file)
name = next(input_file)
comments = next(input_file)
next(input_file)
next(input_file)
for line in input_file:
line_split = line.split()
if line_split[0] == "POINTS":
number_of_points = int(line_split[1])
self.number_of_points = number_of_points
self.points = np.loadtxt(islice(input_file, number_of_points))
elif line_split[0] == "FACES":
number_of_faces = int(line_split[1])
self.faces.number_of_entries = number_of_faces
current_line = next(input_file)
n_data_entries = int(current_line)
self.faces.data = np.loadtxt(islice(input_file, n_data_entries),
dtype=np.dtype('i'))
current_line = next(input_file)
n_pointers = int(current_line)
self.faces.pointers = np.loadtxt(islice(input_file, n_pointers),
dtype=np.dtype('i'))
elif line_split[0] == "FACE_NORMALS":
number_of_faces = int(line_split[1])
self.face_normals = np.loadtxt(islice(input_file,
number_of_faces))
elif line_split[0] == "FACE_AREAS":
number_of_faces = int(line_split[1])
self.face_areas = np.loadtxt(islice(input_file,
number_of_faces))
elif line_split[0] == "FACE_REAL_CENTROIDS":
number_of_faces = int(line_split[1])
self.face_real_centroids = np.loadtxt(islice(input_file,
number_of_faces))
elif line_split[0] == "FACE_SHIFTED_CENTROIDS":
self.has_face_shifted_centroid = True
number_of_faces = int(line_split[1])
self.face_shifted_centroids = np.loadtxt(
islice(input_file, number_of_faces))
elif line_split[0] == "FACE_TO_CELL":
number_of_faces = int(line_split[1])
self.face_to_cell = np.loadtxt(
islice(input_file, number_of_faces))
elif line_split[0] == "CELLS":
number_of_cells = int(line_split[1])
self.cells.number_of_entries = number_of_cells
current_line = next(input_file)
n_data_entries = int(current_line)
self.cells.data = np.loadtxt(islice(input_file, n_data_entries),
dtype=np.dtype('i'))
current_line = next(input_file)
n_pointers = int(current_line)
self.cells.pointers = np.loadtxt(islice(input_file, n_pointers),
dtype=np.dtype('i'))
elif line_split[0] == "CELL_NORMAL_ORIENTATION":
number_of_cells = int(line_split[1])
self.cell_normal_orientation.number_of_entries = \
number_of_cells
current_line = next(input_file)
n_data_entries = int(current_line)
self.cell_normal_orientation.data = \
np.loadtxt(islice(input_file, n_data_entries),
dtype=np.dtype('i'))
current_line = next(input_file)
n_pointers = int(current_line)
self.cell_normal_orientation.pointers = \
np.loadtxt(islice(input_file, n_pointers),
dtype=np.dtype('i'))
elif line_split[0] == "CELL_VOLUMES":
number_of_entries = int(line_split[1])
self.cell_volume = np.loadtxt(islice(input_file,
number_of_entries))
elif line_split[0] == "CELL_REAL_CENTROIDS":
number_of_entries = int(line_split[1])
self.cell_real_centroid = np.loadtxt(islice(input_file,
number_of_entries))
elif line_split[0] == "CELL_SHIFTED_CENTROIDS":
number_of_entries = int(line_split[1])
self.cell_shifted_centroid = np.loadtxt(
islice(input_file, number_of_entries))
elif line_split[0] == "CELL_K":
number_of_cells = int(line_split[1])
self.cell_k = np.loadtxt(islice(input_file, number_of_cells))
elif line_split[0] == "BOUNDARY_MARKERS":
number_of_boundary_markers = int(line_split[1])
for line_index in range(number_of_boundary_markers):
current_line = next(input_file)
line_split = current_line.split()
entries = [int(x) for x in line_split]
boundary_marker = entries.pop(0)
self.add_boundary_marker(boundary_marker, "FROMFILE")
while entries:
self.add_boundary_face(boundary_marker,
entries.pop(0),
entries.pop(0))
elif line_split[0] == "DIRICHLET_BOUNDARY_POINTERS":
number_of_pointers = int(line_split[1])
for line_index in range(number_of_pointers):
current_line = next(input_file)
line_split = current_line.split()
key = int(line_split[0])
cell_index = int(line_split[1])
orientation = int(line_split[2])
self.set_dirichlet_face_pointer(key,
orientation,
cell_index)
elif line_split[0] == "INTERNAL_NO_FLOW":
number_of_faces = int(line_split[1])
for line_index in range(number_of_faces):
current_line = next(input_file)
line_split = current_line.split()
face_index = int(line_split[0])
orientation = int(line_split[1])
self.internal_no_flow.append([face_index, orientation])
elif line_split[0] == "FORCING_FUNCTION_POINTERS":
number_of_cells = int(line_split[1])
for line_index in range(number_of_cells):
current_line = next(input_file)
line_split = current_line.split()
cell_index = int(line_split[0])
entries = list(map(int, line_split[1:]))
face_list = []
orientation_list = []
while entries:
face_list.append(entries.pop(0))
orientation_list.append(entries.pop(0))
self.set_forcing_pointer(cell_index,
face_list,
orientation_list)
elif line_split[0] == "FACE_TO_LAGRANGE_POINTERS":
number_of_pointers = int(line_split[1])
for line_index in range(number_of_pointers):
current_line = next(input_file)
line_split = current_line.split()
face_index = int(line_split[0])
lagrange_index = int(line_split[1])
orientation = int(line_split[2])
self.set_face_to_lagrange_pointer(face_index,
orientation,
lagrange_index)
elif line_split[0] == "LAGRANGE_TO_FACE_POINTERS":
number_of_pointers = int(line_split[1])
for line_index in range(number_of_pointers):
current_line = next(input_file)
line_split = current_line.split()
lagrange_index = int(line_split[0])
face_index = int(line_split[1])
orientation = int(line_split[2])
self.set_lagrange_to_face_pointers(lagrange_index,
face_index,
orientation)
def save_cell(self, cell_index, output_file):
""" Saves individual cell in mms format.
:param int cell_index: Cell index.
:param file output_file: File to output cell to.
:return: None
"""
glob_to_loc_points = {}
temp_mesh = self.__class__()
current_cell = []
current_cell_orientations = []
for (face_index, orientation) in zip(self.get_cell(cell_index),
self.get_cell_normal_orientation(cell_index)):
current_face = []
for point_index in self.get_face(face_index):
if point_index in glob_to_loc_points:
current_face.append(glob_to_loc_points[point_index])
else:
current_point = self.get_point(point_index)
local_index = temp_mesh.add_point(current_point)
glob_to_loc_points[point_index] = local_index
current_face.append(local_index)
new_face_index = temp_mesh.add_face(current_face)
temp_mesh.set_face_area(new_face_index,
self.get_face_area(face_index))
temp_mesh.set_face_normal(new_face_index,
self.get_face_normal(face_index))
current_centroid = self.get_face_real_centroid(face_index)
temp_mesh.set_face_real_centroid(new_face_index, current_centroid)
current_cell.append(new_face_index)
current_cell_orientations.append(orientation)
temp_mesh.add_cell(current_cell, current_cell_orientations)
temp_mesh.set_cell_k(0, self.get_cell_k(cell_index))
temp_mesh.set_cell_volume(0, self.get_cell_volume(cell_index))
current_centroid = self.get_cell_real_centroid(cell_index)
temp_mesh.set_cell_real_centroid(0, current_centroid)
temp_mesh.save_mesh(output_file)
def save_mesh(self, output_file):
""" Saves mesh file in mms format.
:param file output_file: File to save mesh to.
"""
output_file.write(b"this is just a test\n")
output_file.write(tb(str(mimpy.__version__)+"\n"))
output_file.write(b"date\n")
output_file.write(b"name\n")
output_file.write(b"comments\n")
output_file.write(b"#\n")
output_file.write(b"#\n")
## Points
output_file.write(b"POINTS ")
output_file.write(tb(str(len(self.points))+"\n"))
np.savetxt(output_file, self.points)
## Faces
output_file.write(b"FACES ")
output_file.write(tb(str(self.get_number_of_faces())+"\n"))
output_file.write(tb(str(len(self.faces.data))+"\n"))
np.savetxt(output_file, self.faces.data, fmt='%i')
output_file.write(tb(str(len(self.faces.pointers))+"\n"))
np.savetxt(output_file, self.faces.pointers, fmt="%i %i")
output_file.write(b"FACE_NORMALS ")
output_file.write(tb(str(len(self.face_normals))+"\n"))
np.savetxt(output_file, self.face_normals)
output_file.write(b"FACE_AREAS ")
output_file.write(tb(str(self.get_number_of_faces())+"\n"))
for face_index in range(self.get_number_of_faces()):
output_file.write(tb(str((self.get_face_area(face_index)))+"\n"))
output_file.write(b"FACE_REAL_CENTROIDS ")
output_file.write(tb(str(self.get_number_of_faces())+"\n"))
for face_index in range(self.get_number_of_faces()):
current_centroid = self.get_face_real_centroid(face_index)
output_file.write(tb(str(current_centroid[0])+" "))
output_file.write(tb(str(current_centroid[1])+" "))
output_file.write(tb(str(current_centroid[2])+"\n"))
if self.has_face_shifted_centroid:
output_file.write(b"FACE_SHIFTED_CENTROIDS ")
output_file(tb(str(self.get_number_of_faces())+"\n"))
for face_index in range(self.get_number_of_faces()):
output_file.write(
tb(str(self.get_face_real_centroid(face_index)+"\n")))
output_file.write(b"FACE_TO_CELL ")
output_file.write(tb(str(len(self.face_to_cell))+"\n"))
np.savetxt(output_file, self.face_to_cell, fmt="%i %i")
output_file.write(b"CELLS ")
output_file.write(tb(str(self.get_number_of_cells())+"\n"))
output_file.write(tb(str(len(self.cells.data))+"\n"))
np.savetxt(output_file, self.cells.data, fmt='%i')
output_file.write(tb(str(len(self.cells.pointers))+"\n"))
np.savetxt(output_file, self.cells.pointers, fmt="%i %i")
output_file.write(b"CELL_NORMAL_ORIENTATION ")
output_file.write(tb(str(self.get_number_of_cells())+"\n"))
output_file.write(tb(str(len(self.cell_normal_orientation.data))+"\n"))
np.savetxt(output_file,
self.cell_normal_orientation.data, fmt='%i')
output_file.write(
tb(str((len(self.cell_normal_orientation.pointers)))+"\n"))
np.savetxt(output_file,
self.cell_normal_orientation.pointers,
fmt="%i %i")
output_file.write(b"CELL_VOLUMES ")
output_file.write(tb(str(len(self.cell_volume))+"\n"))
np.savetxt(output_file, self.cell_volume)
output_file.write(b"CELL_REAL_CENTROIDS ")
output_file.write(tb(str(len(self.cell_real_centroid))+"\n"))
np.savetxt(output_file, self.cell_real_centroid)
if self.has_cell_shifted_centroid:
output_file.write(b"CELL_SHIFTED_CENTROIDS ")
output_file.write(tb(str(len(self.cell_shifted_centroid))+"\n"))
np.savetxt(ouptut_file, self.cell_shifted_centroid)
output_file.write(b"CELL_K ")
output_file.write(tb(str(len(self.cell_k))+"\n"))
np.savetxt(output_file, self.cell_k)
output_file.write(b"BOUNDARY_MARKERS ")
output_file.write(tb(str(len(self.boundary_markers))+"\n"))
for marker_index in self.boundary_markers:
output_file.write(tb(str(marker_index)+" "))
for (face_index, face_orientation) in\
self.get_boundary_faces_by_marker(marker_index):
output_file.write(
tb(str(face_index)+" "+str(face_orientation)+" "))
output_file.write(b"\n")
output_file.write(b"DIRICHLET_BOUNDARY_POINTERS ")
output_file.write(tb(str(len(list(
self.dirichlet_boundary_pointers.keys())))+"\n"))
for key in self.dirichlet_boundary_pointers:
cell_index, orientation = self.dirichlet_boundary_pointers[key]
output_file.write(
tb(str(key)+" "+str(cell_index)+" "+str(orientation)+"\n"))
output_file.write(b"INTERNAL_NO_FLOW ")
output_file.write(tb(str(len(self.internal_no_flow))+"\n"))
for [face_index, orientation] in self.internal_no_flow:
output_file.write(tb(str(face_index)+" "+str(orientation)+"\n"))
output_file.write(b"FORCING_FUNCTION_POINTERS ")
output_file.write(
tb(str(len(list(self.forcing_function_pointers.keys())))+"\n"))
for cell_index in self.forcing_function_pointers:
output_file.write(tb(str(cell_index)+" "))
for face_index, orientation in \
self.forcing_function_pointers[cell_index]:
output_file.write(tb(str(face_index)+" "+str(orientation)+" "))
output_file.write(b"\n")
output_file.write(b"FACE_TO_LAGRANGE_POINTERS ")
output_file.write(
tb(str(len(list(self.face_to_lagrange_pointers.keys())))+"\n"))
for key in self.face_to_lagrange_pointers:
lagrange_index, orientation = self.face_to_lagrange_pointers[key]
output_file.write(
tb(str(key)+" "+str(lagrange_index)+" "+str(orientation)+"\n"))
output_file.write(b"LAGRANGE_TO_FACE_POINTERS ")
output_file.write(
tb(str(len(list(self.lagrange_to_face_pointers.keys())))+"\n"))
for key in self.lagrange_to_face_pointers:
face_index, orientation = self.lagrange_to_face_pointers[key]
output_file.write(
tb(str(key)+" "+str(face_index)+" "+str(orientation)+"\n"))
output_file.close()
def set_cell_faces(self, cell_index, faces):
""" Sets the cell faces.
:param int cell_index: Cell index.
:param list faces: Faces making up cell.
:return: None
"""
self.cells[cell_index] = faces
for face_index in faces:
if cell_index not in self.face_to_cell[face_index]:
self.add_to_face_to_cell(face_index, cell_index)
def set_cell_orientation(self, cell_index, orientation):
""" Sets the cell orientation of faces.
:param int cell_index: Cell index.
:paramt list orientation: List of new cell face orientations.
:return: None
"""
self.cell_normal_orientation[cell_index] = orientation
def add_cell(self,
list_of_faces,
list_of_orientations):
""" Adds a new cell to the mesh. A cell is represented
by a list of face indices. The function also
takes in a list of orientations of the same length
as the list_of_faces. These represent the direction
of the face normals relative to the cell: (1) points in,
(-1) points out.
Returns the index of the new cell.
:param list list_of_faces: List of face indices making up new cell.
:param list list_of_orientations: List consisting of 1s and -1s\
indicating whether normals are pointing out (1) or in (-1) of cell.
:return: New cell index.
:rtype: int
"""
new_cell_index = self.cells.add_entry(list_of_faces)
self.cell_normal_orientation.add_entry(list_of_orientations)
if len(self.cell_volume)-1<new_cell_index:
new_size = self._memory_extension(len(self.cell_volume))
self.cell_volume.resize(new_size, refcheck=False)
if len(self.cell_k)-1<new_cell_index:
new_size = self._memory_extension(len(self.cell_k))
self.cell_k.resize((new_size, 9), refcheck=False)
for face_index in list_of_faces:
if self.face_to_cell[face_index][0] == -1:
self.face_to_cell[face_index][0] = new_cell_index
elif self.face_to_cell[face_index][1] == -1:
self.face_to_cell[face_index][1] = new_cell_index
else:
raise Exception("setting face "+str(face_index)+" to cell "+
str(new_cell_index)+" already set"+
" to two cells "+
str(self.face_to_cell[face_index]))
if len(self.cell_domain)-1<new_cell_index:
new_size = self._memory_extension(len(self.cell_domain))
self.cell_domain.resize(new_size, refcheck=False)
if len(self.cell_real_centroid)-1<new_cell_index:
new_size = self._memory_extension(len(self.cell_real_centroid))
self.cell_real_centroid.resize((new_size, 3))
if self.has_alpha:
self.cell_alpha.append(None)
len(self.cell_shifted_centroid)
if self.has_cell_shifted_centroid:
if len(self.cell_shifted_centroid)-1<new_cell_index:
new_size = self._memory_extension(
len(self.cell_shifted_centroid))
self.cell_shifted_centroid.resize((new_size, 3))
return new_cell_index
def get_cell(self, cell_index):
""" Given a cell_index, it returns the list of faces
that make up that cell.
:param int cell_index: Cell index of interest.
:return: List of faces making up cell.
:rtype: list
"""
return self.cells[cell_index]
def get_cell_normal_orientation(self, cell_index):
""" Given a cell index, returns a list of face
orientations for that cell.
:param int cell_index: Index of cell.
:return: List of faces orientations in cell. The\
list is made up of 1s and -1s, 1 if the corresponding\
face normal is pointing out of the cell, and -1 if the\
corresponding face normal is pointing into the cell.
:rtype: list
"""
return self.cell_normal_orientation[cell_index]
def get_number_of_cells(self):
""" Returns total number of cells in mesh.
:return: Number of cells in mesh.
:rtype: int
"""
return len(self.cells)
def set_cell_real_centroid(self, cell_index, centroid):
""" Sets the array of the cell centroid.
:param int cell_index: Index of cell.
:param ndarray centroid: New cell centroid.
:return: None
"""
self.cell_real_centroid[cell_index] = centroid
def get_cell_real_centroid(self, cell_index):
""" Returns array of the cell centroid
"""
return self.cell_real_centroid[cell_index]
def get_all_cell_real_centroids(self):
""" Returns list of all cell centroids.
:return: List of all the cell centroids.
:rtype: ndarray
"""
return self.cell_real_centroid[:self.get_number_of_cells()]
def get_all_cell_shifted_centroids(self):
""" Returns list of all cell centroids.
:return: List of all shifted cell centroid.
:rtype: ndarray
"""
return self.cell_shifted_centroid[:self.get_number_of_cells()]
def set_cell_shifted_centroid(self, cell_index, centroid):
""" Sets the shifted centroid for cell_index.
:param int cell_index: Index of cell.
:param ndarray centroid: Shifted centroid point.
:return: None
"""
self.cell_shifted_centroid[cell_index] = centroid
def use_face_shifted_centroid(self):
""" Sets whether a shifted face centroid will be used
for mesh.
"""
self.has_face_shifted_centroid = True
def is_using_face_shifted_centroid(self):
""" Returns if shifted face centroids are used
and set in mesh.
:return: Whether face shifted centroids are set
and used.
:rtype: bool
"""
return self.has_face_shifted_centroid
def use_cell_shifted_centroid(self):
""" Sets whether a shifted cell centroid will be used
for mesh.
"""
self.has_cell_shifted_centroid = True
def is_using_cell_shifted_centroid(self):
""" Returns if shifted face centroids are used
and set in mesh.
:return: Whether cell shifted centroids are set
and used.
:rtype: bool
"""
return self.has_cell_shifted_centroid
def get_cell_shifted_centroid(self, cell_index):
""" Returns the shifted cell centroid for cell_index.
:param int cell_index: Index of cell.
:return: Cell shifted point.
:rtype: ndarray
"""
return self.cell_shifted_centroid[cell_index]
def set_cell_volume(self, cell_index, volume):
""" Sets cell volume for cell_index.
:param int cell_index: Index of cell.
:param float volume: New volume to be set for cell.
:return: None
"""
self.cell_volume[cell_index] = volume
def get_cell_volume(self, cell_index):
""" Returns cell volume for cell_index.
"""
return self.cell_volume[cell_index]
def set_cell_k(self, cell_index, k):
""" Set cell permeability tensor K
(Numpy matrix) for cell_index.
"""
self.cell_k[cell_index] = k.reshape((1, 9))
def get_cell_k(self, cell_index):
""" Return permeability tensor k
(Numpy matrix) for cell_index.
"""
return self.cell_k[cell_index].reshape((3, 3))
def get_all_k_entry(self, i, j):
""" Returns a list of all K[i, j].
"""
return self.cell_k[:self.get_number_of_cells(), i*3+j]
def get_all_k(self):
""" Returns a list of all cell
permeability tensors.
"""
return self.cell_k
def use_alpha(self):
""" Activates the ability to set the
alpha parameter for each cell.
"""
self.has_alpha = True
def set_alpha_by_cell(self, alpha, cell_index):
""" Set alpha (float) for cell_index.
"""
self.cell_alpha[cell_index] = alpha
def get_alpha_by_cell(self, cell_index):
""" Returns alpha (float) for cell_index.
"""
return self.cell_alpha[cell_index]
def set_face_real_centroid(self, face_index, centroid):
""" Sets face centroid for face_index.
"""
self.face_real_centroids[face_index] = centroid
def get_face_real_centroid(self, face_index):
""" Returns face centroid.
"""
return self.face_real_centroids[face_index]
def set_face_shifted_centroid(self, face_index, centroid):
""" Sets face shifted centroid.
"""
self.face_shifted_centroids[face_index] = centroid
def get_face_shifted_centroid(self, face_index):
""" Return face shifted centroid coordinates.
"""
return self.face_shifted_centroids[face_index]
def set_face_area(self, face_index, area):
""" Sets face area (float) for face_index.
"""
self.face_areas[face_index] = area
def get_face_area(self, face_index):
""" Return area of face.
"""
return self.face_areas[face_index]
def set_face_normal(self, face_index, normal):
""" Set face normal (array) to face_index.
"""
self.face_normals[face_index] = normal
def get_face_normal(self, face_index):
""" Return face normal for face_index.
"""
return self.face_normals[face_index]
def set_boundary_markers(self, boundary_markers, boundary_descriptions):
""" Initialize the mesh boundary labeling. Each marker
can represent a single boundary face or a group
of faces.
boundary_markers: List of integers.
boundary_descriptions: List of strings describing
the face groups.
"""
self.boundary_markers = boundary_markers
self.boundary_descriptions = boundary_descriptions
for marker in boundary_markers:
self.boundary_faces[marker] = []
def add_boundary_marker(self, boundary_marker, boundary_description):
""" Add a new boundary marker.
:param int boundary_marker: New boundary marker index.
:param str boundary_description: Text describing
the boundary marker.
"""
self.boundary_markers.append(boundary_marker)
self.boundary_descriptions.append(boundary_description)
self.boundary_faces[boundary_marker] = []
def create_new_boundary_marker(self, boundary_description):
""" Creates new boundary marker and assures
that the index is unique. Returns the
index of the new boundary marker.
"""
new_index = len(self.boundary_markers)
self.boundary_markers.append(new_index)
self.boundary_descriptions.append(boundary_description)
self.boundary_faces[new_index] = []
return new_index
def has_boundary_marker(self, boundary_marker):
""" Returns true if boundary_marker exists.
"""
return boundary_marker in self.boundary_markers
def get_boundary_markers(self):
""" Returns a list of all boundary markers.
"""
return self.boundary_markers
def get_boundary_description(self, boundary_marker):
""" Returns the boundary description for
boundary_marker.
"""
return self.boundary_descriptions[boundary_marker]
def add_boundary_face(self,
boundary_marker,
face_index,
face_orientation):
""" Assign face_index to a certain boundary_marker grouping.
the face_orientation indicates whether the normal of that
face points in (-1) or out (1) of the cell the face
belongs to.
A face should never be associated with more than one marker.
:param int boundary_marker: Boundary marker index.
:param int face_index: Index of face.
:param int face_orientation: Orientation of face normal\
relative to the domain. (1) if pointing out, (-1) if\
if pointing in.
:return: None
"""
self.boundary_faces[boundary_marker].append([face_index,
face_orientation])
def set_boundary_faces(self,
boundary_marker,
face_orientation_list):
""" Takes a boundary_marker index, and sets the entire list
of tuples for that boundary marker.
:param int boundary_marker: Boundary marker to be set.
:param list face_orienation_list: A list of tuples of the form\
[face_index, orientation] to be associated with the\
indicated boundary marker.
:return: None
"""
self.boundary_faces[boundary_marker] = face_orientation_list
def get_boundary_faces_by_marker(self, boundary_marker):
""" Returns a list of all the faces associated with a boundary_marker.
:param int boundary_marker: Boundary marker index.
:return: List of tupes [face_index, orientation] associated with\
boundary_marker.
:rtype: list
"""
return self.boundary_faces[boundary_marker]
def is_boundary_face(self, face_index, markers):
""" Returns True if face_index belongs to
any of the markers.
"""
for boundary_marker in markers:
for face in self.boundary_faces[boundary_marker]:
if face_index == face[0]:
return True
return False
def find_boundary_marker(self, face_index, markers):
""" Returns the boundary marker containing
face_index.
"""
for boundary_marker in markers:
for face in self.boundary_faces[boundary_marker]:
if face_index == face[0]:
return boundary_marker
def set_boundary_face_orientation(self, face_index, new_orientation):
""" Set orientation for face_index.
"""
for bm in self.boundary_markers:
for face in self.boundary_faces[bm]:
if face_index == face[0]:
face[1] = new_orientation
def get_number_of_boundary_faces(self):
""" Returns number of faces on the boundary
of the domain.
"""
number_of_boundary_faces = 0
for boundary_marker in self.boundary_markers:
number_of_boundary_faces += \
len(self.boundary_faces[boundary_marker])
return number_of_boundary_faces
def add_internal_no_flow(self, face_index, face_orientation):
""" Sets face as interior no flow boundary condition.
:param int face_index: Face index to be set as internal no-flow.
:param int face_orientation: Orientation of face relative to the\
domain, (1) for pointing out, (-1) for pointing in.
"""
self.internal_no_flow.append([face_index, face_orientation])
def get_internal_no_flow(self):
""" Returns list of faces set as
internal no flow condition.
"""
return self.internal_no_flow
def set_dirichlet_face_pointer(self,
face_index,
face_orientation,
cell_index):
""" Sets the value of a Dirichlet boundary to
value of cell pressure for cell_index.
This approach is used for coupling fractures
with a reservoir.
"""
# The function adds a zero entry to the
# dirichlet_boundary_values dict. This
# allows the MFD code to build the matrix
# correctly, and doesn't effect the right-hand
# side of the problem.
self.dirichlet_boundary_pointers[face_index] = \
(cell_index, face_orientation)
def get_dirichlet_pointer_faces(self):
""" Returns all the faces with Dirichlet
values set by pointing to a cell.
"""
return list(self.dirichlet_boundary_pointers.keys())
def set_face_to_lagrange_pointer(self,
face_index,
face_orientation,
lagrange_index):
""" Sets face to dirichlet type boundary pointing to
lagrange multiplier.
"""
# The function adds a zero entry to the
# dirichlet_boundary_values dict. This
# allows the MFD code to build the matrix
# correctly, and doesn't effect the right-hand
# side of the problem.
self.face_to_lagrange_pointers[face_index] = \
(lagrange_index, face_orientation)
def get_all_face_to_lagrange_pointers(self):
""" Returns all face indices that are
pointing to a lagrange multiplier.
"""
return list(self.face_to_lagrange_pointers.keys())
def get_face_to_lagrange_pointer(self, face_index):
""" Returns the lagrange multiplier index
and the face normal orientation.
"""
return self.face_to_lagrange_pointers[face_index]
def set_lagrange_to_face_pointers(self,
lagrange_index,
face_index,
orientation):
""" Sets the lagrange multiplier to the source faces
in order to impose zero flux across the boundary.
"""
self.lagrange_to_face_pointers[lagrange_index] = \
zip(face_index, orientation)
def get_all_lagrange_to_face_pointers(self):
""" Returns all lagrange face indices that
point to fluxes.
"""
return list(self.lagrange_to_face_pointers.keys())
def get_lagrange_to_face_pointers(self, lagrange_index):
""" Returns the faces the lagrange_index face
points too.
"""
return self.lagrange_to_face_pointers[lagrange_index]
def get_dirichlet_pointer(self, face_index):
""" Returns the cell_index for
which the Dirichlet boundary will be set
implicitly.
"""
return self.dirichlet_boundary_pointers[face_index]
def set_periodic_boundary(self,
face_index_1,
face_orientation_1,
face_index_2,
face_orientation_2):
""" Sets a periodic boundary condition, connecting
face 1 with face 2. This ammounts to creating a single
lagrange multiplier shared by the faces. The MFD class would
impose continuity of both the pressure and the flux
for the periodic conditions.
"""
lagrange_index_1 = self.duplicate_face(face_index_1)
self.periodic_boundaries.append((face_index_1,
face_orientation_1,
face_index_2,
face_orientation_2,
lagrange_index_1,))
def set_forcing_pointer(self,
cell_index,
face_indices,
face_orientations):
""" Sets the value of the forcing function
implicity as the sum of the fluxes from list
of faces. This approach is used for coupling
fractures with a reservoir.
"""
# The function adds a zero entry to the
# cell_forcing_function dict. This
# allows the MFD code to build the matrix
# correctly, and doesn't effect the right-hand
# side of the problem.
# If the forcing function is set later on
# (in case of well for example), it
# becomes additive to the source term
# for that cell.
self.forcing_function_pointers[cell_index] = \
list(zip(face_indices, face_orientations))
def get_forcing_pointer_cells(self):
""" Returns cell indices with forcing function
pointers.
"""
return list(self.forcing_function_pointers.keys())
def get_forcing_pointers_for_cell(self, cell_index):
""" Returns list of pointers (face_indices)
for cell_index.
"""
return self.forcing_function_pointers[cell_index]
def set_cell_domain(self, cell_index, domain):
""" Sets cell domain identifier
for cell_index.
"""
self.cell_domain[cell_index] = domain
self.cell_domain_tags.add(domain)
def get_domain_tags(self):
""" Returns list of all domain tags.
"""
return list(self.cell_domain_tags)
def get_cell_domain(self, cell_index):
""" Returns cell domain identifier
for cell_index.
"""
return self.cell_domain[cell_index]
def get_cell_domain_all(self):
""" Returns list containing
all cell_domain tags.
"""
return self.cell_domain[:self.get_number_of_cells()]
def get_cells_in_domain(self, domain):
""" Returns all cells with domain tag.
"""
cells_in_domain = []
for cell_index in range(self.get_number_of_cells()):
if self.cell_domain[cell_index] == domain:
cells_in_domain.append(cell_index)
return cells_in_domain
def set_gravity_vector(self, gravity_vector):
""" Set vector indicating gravity acceleration direction.
"""
self.gravity_vector = gravity_vector
def get_gravity_vector(self):
""" Returns gravity vector (down direction)
"""
return self.gravity_vector
def get_gravity_acceleration(self):
""" Returns the gravity acceleration constant.
"""
return self.gravity_acceleration
def find_basis_for_face(self, face_index):
""" Finds two non collinear vectors
in face to serve as basis for plane.
"""
face = self.get_face(face_index)
for i in range(len(face)):
v1 = self.get_point(face[i+1]) - self.get_point(face[i])
v2 = self.get_point(face[i]) - self.get_point(face[i-1])
v2 /= np.linalg.norm(v2)
v1 /= np.linalg.norm(v1)
if 1.-abs(v1.dot(v2)) > 1.e-6:
return (v1, v2, face[i])
raise Exception("Couldn't compute basis for face " + str(face_index))
def find_face_normal(self, face_index):
""" Finds the face normal based on
rotation around the face boundary.
Assumes the face is planar.
"""
face = self.get_face(face_index)
for i in range(len(face)):
v1 = self.get_point(face[i+1]) - self.get_point(face[i])
v2 = self.get_point(face[i]) - self.get_point(face[i-1])
new_face_normal = np.cross(v2, v1)
if np.linalg.norm(new_face_normal) >1.e-10:
new_face_normal /= np.linalg.norm(new_face_normal)
return new_face_normal
raise Exception("Couldn't compute normal for face " + str(face_index))
def find_centroid_for_coordinates(self, face_index, coordinates):
""" Computes centroid calculation for a 3D polygon based on
two coordinates of the polygon.
"""
C_1 = 0.
C_2 = 0.
area = 0.
index_1 = coordinates[0]
index_2 = coordinates[1]
current_face = self.get_face(face_index)
for index in range(len(current_face)):
current_point = self.get_point(current_face[index])
if index == len(current_face)-1:
next_point = self.get_point(current_face[0])
else:
next_point = self.get_point(current_face[index+1])
C_1 += ((current_point[index_1]+next_point[index_1])*
(current_point[index_1]*next_point[index_2]-
next_point[index_1]*current_point[index_2]))
C_2 += ((current_point[index_2]+next_point[index_2])*
(current_point[index_1]*next_point[index_2]-
next_point[index_1]*current_point[index_2]))
area += current_point[index_1]*next_point[index_2]
area -= next_point[index_1]*current_point[index_2]
area /= 2.
C_1 /= 6.*area
C_2 /= 6.*area
return (area, C_1, C_2)
def find_face_centroid(self, face_index):
""" Returns centroid coordinates for face_index.
This function assumes planarity of the face.
and is currently intended for use with three dimensional
meshes.
The function returns the area of the face, as well
as the x, y, z coordinates of its center.
"""
(v1, v2, origin_index) = self.find_basis_for_face(face_index)
polygon = [np.array(self.get_point(x))
for x in self.get_face(face_index)]
assert(np.linalg.norm(v2) >1.e-12)
assert(np.linalg.norm(v1) >1.e-12)
v1 = v1/np.linalg.norm(v1)
v_temp = np.cross(v1, v2)
v2 = np.cross(v_temp, v1)
if | np.linalg.norm(v2) | numpy.linalg.norm |
import os
import h5py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def countUniqueVal(dataframe, column):
""" Print unique values for each columns
"""
for count, name in enumerate(column):
print("#{} - {}".format(count, name))
print(dataframe[name].value_counts())
print("\n")
return
def dataloader(filename, save_description=False, print_description=False):
dirname = "Dataset"
filepath = os.path.normpath(os.path.join(os.path.join(os.getcwd(), dirname), filename))
with h5py.File(filepath, "r") as hdf:
print("Keys: {}".format(hdf.keys()))
# Development set
W_dev = np.array(hdf.get('W_dev')) # W - operative condition
X_s_dev = np.array(hdf.get('X_s_dev')) # X_s - measured signal
X_v_dev = np.array(hdf.get('X_v_dev')) # X_v - virtual sensors
T_dev = np.array(hdf.get('T_dev')) # T - engine health parameters
Y_dev = np.array(hdf.get('Y_dev')) # RUL - RUL label
A_dev = np.array(hdf.get('A_dev')) # Auxiliary - unit number u and the flight cycle number c, the flight class Fc and the health state h s
# Test set
W_test = np.array(hdf.get('W_test')) # W
X_s_test = np.array(hdf.get('X_s_test')) # X_s
X_v_test = np.array(hdf.get('X_v_test')) # X_v
T_test = np.array(hdf.get('T_test')) # T
Y_test = np.array(hdf.get('Y_test')) # RUL
A_test = np.array(hdf.get('A_test')) # Auxiliary
# Varnams
W_var = np.array(hdf.get('W_var'))
X_s_var = np.array(hdf.get('X_s_var'))
X_v_var = np.array(hdf.get('X_v_var'))
T_var = np.array(hdf.get('T_var'))
A_var = np.array(hdf.get('A_var'))
# from np.array to list dtype U4/U5
W_var = list(np.array(W_var, dtype='U20'))
X_s_var = list( | np.array(X_s_var, dtype='U20') | numpy.array |
"""
Utilities about Boolean networks.
"""
import numpy as np
from ortools.graph import pywrapgraph
class BooleanNetwork:
"""
A Boolean network model for target_gene regulatory networks.
"""
def __init__(self, update_functions):
"""
Initialize a Boolean network with the given Boolean update functions for each gene.
:param update_functions: iterable, each item is a Boolean update function. Requirements: (1) accept a single 1d
binary array input representing a state vector; (2) return a binary value denoting the new state of a target_gene.
"""
self._n_genes = len(update_functions)
self._fs = list(update_functions)
def async_simulate(self, initial_states):
"""
Simulate the Boolean network model asynchronously starting from the given initial state.
:param initial_states: array-like, which contains 0 or 1 for each target_gene
We may provide one initial state in a 1d array / list, or multiple initial states in a 2d array, where each
row denotes an initial state.
:return: set, representing the model state space reachable from the initial state with the asynchronous update
strategy, where each item is a binary state.
In an asynchronous update scheme, at most one gene is updated between two consecutive states.
"""
# use depth-first search-here
initial_states = np.array(initial_states)
if initial_states.ndim == 1: # only one initial state
assert len(initial_states) == self.n_genes
initial_states = np.reshape(initial_states, (-1, len(initial_states)))
else:
assert initial_states.shape[1] == self.n_genes
q = list(initial_states)
model_space = set(tuple(s) for s in initial_states)
while q:
old_state = q.pop()
for i in range(self._n_genes): # if the ith target_gene is to be updated
new_state = | np.copy(old_state) | numpy.copy |
from timeit import default_timer as timer
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
import scipy.optimize
from scipy.special import expit, xlog1py
from experiments import experimental_design
from performance_metrics import performance_metrics
class CSLogit:
def __init__(self, initial_theta, lambda1=0, lambda2=0, theta_opt=None, obj='aec'):
self.initial_theta = initial_theta
self.lambda1 = lambda1
self.lambda2 = lambda2
self.theta_opt = theta_opt
self.obj = obj
def fitting(self, x, y, cost_matrix):
if self.obj == 'weightedce':
misclass_cost = np.zeros(len(y))
misclass_cost[y == 0] = cost_matrix[:, 1, 0][y == 0]
misclass_cost[y == 1] = cost_matrix[:, 0, 1][y == 1]
def get_obj_func(theta):
if self.obj == 'ce':
return self.ce(theta, x, y)
if self.obj == 'ce10':
return self.ce10(theta, x, y)
elif self.obj == 'weightedce':
return self.weighted_ce(theta, x, y, misclass_cost)
elif self.obj == 'aec':
return self.aec(theta, x, y, cost_matrix)
elif self.obj == 'logaec':
return self.logaec(theta, x, y, cost_matrix)
else:
raise Exception('No correct loss specified')
self.theta, func_min, _ = self.optimization(get_obj_func, self.initial_theta)
return func_min
def optimization(self, obj_func, initial_theta):
opt_res = scipy.optimize.minimize(obj_func, initial_theta, method='L-BFGS-B',
options={'ftol': 1e-6, 'disp': False})
theta_opt, func_min, n_iter = opt_res.x, opt_res.fun, opt_res.nfev
self.theta_opt = theta_opt
return theta_opt, func_min, n_iter
def ce(self, theta, x, y_true):
scores = expit(theta[0] + x.dot(theta[1:]))
eps = 1e-9 # small value to avoid log(0)
ce = - (y_true * np.log(scores + eps) + (1 - y_true) * np.log(1 - scores + eps))
# Add regularization
ce += self.lambda1 * np.sum(abs(theta[1:])) + self.lambda2 * | np.sum(theta[1:] ** 2) | numpy.sum |
import cv2
import foolbox
import foolbox.attacks as fa
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
from foolbox.distances import Linf
from matplotlib import rcParams
from utils import utils
CLASS_NAMES = [
"tench",
"English springer",
"cassette player",
"chain saw",
"church",
"French horn",
"garbage truck",
"gas pump",
"golf ball",
"parachute",
]
model = utils.load_trained_model(
model_name="resnet",
model_path="../models/200224_0901_resnet_imagenette.pth",
class_num=len(CLASS_NAMES),
)
preprocessing = dict(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], axis=-3
)
# resize image to size 213 * 213
transform = transforms.Compose(
[transforms.Resize((213, 213)), transforms.ToTensor()]
)
# load dataset with validation images
dataset = torchvision.datasets.ImageFolder(
root="../data/imagenette2-160/val", transform=transform
)
# 2. get first 100 images (all tenches)
dataset = torch.utils.data.Subset(dataset, [431])
# compose dataset into dataloader
# (don't shuffle, no need to shuffle, we're not training.)
dataset_loader = torch.utils.data.DataLoader(dataset, batch_size=1)
# get dataset size (length)
dataset_size = len(dataset)
# Use GPU if available
if torch.cuda.is_available():
model = model.cuda()
fmodel = foolbox.models.PyTorchModel(
model,
bounds=(0, 1),
num_classes=len(CLASS_NAMES),
preprocessing=preprocessing,
)
rcParams["font.family"] = "monospace"
def img_to_np(img):
""" Transpose image to viewable format to plot/visualize. """
return np.transpose(img, (1, 2, 0))
attack = fa.GradientSignAttack(fmodel, distance=Linf)
for image, label in dataset_loader:
plt.figure(figsize=(12, 5.4))
plt.subplot(1, 3, 1)
plt.imshow(img_to_np(image.squeeze()))
prob = fmodel.forward(image.numpy())
pred = np.argmax(prob, axis=-1)
plt.title(
"Original prediction:\n{}".format(CLASS_NAMES[pred[0]]), color="b",
)
adv = attack(image.numpy(), label.numpy(), epsilons=[4 / 255])
prob = fmodel.forward(adv)
pred = np.argmax(prob, axis=-1)
plt.subplot(1, 3, 2)
plt.imshow(img_to_np(adv.squeeze()))
plt.title(
"Adversary prediction:\n{}".format(CLASS_NAMES[pred[0]]), color="r",
)
# plt.subplot(1, 3, 3)
# plt.imshow(img_to_np((adv - image.numpy()).squeeze()))
# plt.title("Perturbation: $\\epsilon=4/255$")
resized_adv = cv2.resize(
np.moveaxis(adv.squeeze(), 0, 2),
(0, 0),
fx=0.5,
fy=0.5,
interpolation=cv2.INTER_LINEAR,
)
resized_adv = np.moveaxis(resized_adv, 2, 0)
resized_adv = | np.expand_dims(resized_adv, axis=1) | numpy.expand_dims |
import warnings
from functools import lru_cache
from typing import Optional
import numpy as np
import scipy.sparse as sp
from numba import njit
from tqdm import tqdm
from torch_geometric.data import Data
from graphwar import Surrogate
from graphwar.attack.targeted.targeted_attacker import TargetedAttacker
from graphwar.utils import singleton_filter, scipy_normalize, LikelihoodFilter
class Nettack(TargetedAttacker, Surrogate):
r"""Implementation of `Nettack` attack from the:
`"Adversarial Attacks on Neural Networks for Graph Data"
<https://arxiv.org/abs/1805.07984>`_ paper (KDD'18)
Parameters
----------
data : Data
PyG-like data denoting the input graph
device : str, optional
the device of the attack running on, by default "cpu"
seed : Optional[int], optional
the random seed for reproducing the attack, by default None
name : Optional[str], optional
name of the attacker, if None, it would be :obj:`__class__.__name__`,
by default None
kwargs : additional arguments of :class:`graphwar.attack.Attacker`,
Raises
------
TypeError
unexpected keyword argument in :obj:`kwargs`
Example
-------
>>> from graphwar.dataset import GraphWarDataset
>>> import torch_geometric.transforms as T
>>> dataset = GraphWarDataset(root='~/data/pygdata', name='cora',
transform=T.LargestConnectedComponents())
>>> data = dataset[0]
>>> surrogate_model = ... # train your surrogate model
>>> from graphwar.attack.targeted import Nettack
>>> attacker = Nettack(data)
>>> attacker.setup_surrogate(surrogate_model)
>>> attacker.reset()
>>> attacker.attack(target=1) # attacking target node `1` with default budget set as node degree
>>> attacker.reset()
>>> attacker.attack(target=1, num_budgets=1) # attacking target node `1` with budget set as 1
>>> attacker.data() # get attacked graph
>>> attacker.edge_flips() # get edge flips after attack
>>> attacker.added_edges() # get added edges after attack
>>> attacker.removed_edges() # get removed edges after attack
Note
----
* Please remember to call :meth:`reset` before each attack.
"""
# Nettack can conduct feature attack
_allow_feature_attack = True
_allow_singleton: bool = False
def __init__(self, data: Data, device: str = "cpu",
seed: Optional[int] = None, name: Optional[str] = None, **kwargs):
super().__init__(data=data, device=device, seed=seed, name=name, **kwargs)
feat = self.feat
self.scipy_feat = sp.csr_matrix(feat.cpu().numpy())
self.cooc_matrix = sp.csr_matrix((feat.t() @ feat).cpu().numpy())
def setup_surrogate(self, surrogate):
Surrogate.setup_surrogate(self, surrogate=surrogate, freeze=True)
W = None
for para in self.surrogate.parameters():
if para.ndim == 1:
warnings.warn(f"The surrogate model has `bias` term, which is ignored and the "
f"model itself may not be a perfect choice for {self.name}.")
continue
if W is None:
W = para
else:
W = para @ W
assert W is not None
self.W = W.t().cpu().numpy()
self.num_classes = self.W.shape[-1]
return self
def reset(self):
super().reset()
self.modified_adj = self.adjacency_matrix.copy()
self.modified_feat = self.scipy_feat.copy()
self.adj_norm = scipy_normalize(self.modified_adj)
self.cooc_constraint = None
return self
def compute_cooccurrence_constraint(self, nodes):
num_nodes = self.num_nodes
num_feats = self.num_feats
words_graph = self.cooc_matrix - \
sp.diags(self.cooc_matrix.diagonal(), format='csr')
words_graph.eliminate_zeros()
words_graph.data = words_graph.data > 0
word_degrees = words_graph.sum(0).A1
inv_word_degrees = np.reciprocal(word_degrees.astype(float) + 1e-8)
sd = np.zeros(num_nodes)
for n in range(num_nodes):
n_idx = self.modified_feat[n].nonzero()[1]
sd[n] = np.sum(inv_word_degrees[n_idx.tolist()])
scores_matrix = sp.lil_matrix((num_nodes, num_feats))
for n in nodes:
common_words = words_graph.multiply(self.modified_feat[n])
idegs = inv_word_degrees[common_words.nonzero()[1]]
nnz = common_words.nonzero()[0]
scores = np.array([idegs[nnz == ix].sum()
for ix in range(num_feats)])
scores_matrix[n] = scores
self.cooc_constraint = sp.csr_matrix(
scores_matrix - 0.5 * sd[:, None] > 0)
def gradient_wrt_x(self, label):
return (self.adj_norm @ self.adj_norm)[self.target].T @ sp.coo_matrix(self.W[:, label].reshape(1, -1))
def compute_logits(self):
return (self.adj_norm @ self.adj_norm @ self.modified_feat @ self.W)[self.target].ravel()
def strongest_wrong_class(self, logits):
target_label_onehot = np.eye(self.num_classes)[self.target_label]
return (logits - 1000 * target_label_onehot).argmax()
def feature_scores(self):
if self.cooc_constraint is None:
self.compute_cooccurrence_constraint(self.influence_nodes)
logits = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits)
gradient = self.gradient_wrt_x(
self.target_label) - self.gradient_wrt_x(best_wrong_class)
surrogate_loss = logits[self.target_label] - logits[best_wrong_class]
gradients_flipped = (gradient * -1).tolil()
gradients_flipped[self.modified_feat.nonzero()] *= -1
X_influencers = sp.lil_matrix(self.modified_feat.shape)
X_influencers[self.influence_nodes] = self.modified_feat[self.influence_nodes]
gradients_flipped = gradients_flipped.multiply(
(self.cooc_constraint + X_influencers) > 0)
nnz_ixs = np.array(gradients_flipped.nonzero()).T
sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
sorted_ixs = nnz_ixs[sorting]
grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]
scores = surrogate_loss - grads
return sorted_ixs[::-1], scores.A1[::-1]
def structure_score(self, a_hat_uv, XW):
logits = a_hat_uv @ XW
label_onehot = np.eye(self.num_classes)[self.target_label]
best_wrong_class_logits = (logits - 1000 * label_onehot).max(1)
logits_for_correct_class = logits[:, self.target_label]
struct_scores = logits_for_correct_class - best_wrong_class_logits
return struct_scores
@lru_cache(maxsize=1)
def compute_XW(self):
return self.modified_feat @ self.W
def get_attacker_nodes(self, n=5, add_additional_nodes=False):
assert n < self.modified_adj.shape[0] - \
1, "number of influencers cannot be >= number of nodes in the graph!"
neighbors = self.modified_adj[self.target].indices
candidate_edges = np.column_stack(
(np.tile(self.target, len(neighbors)), neighbors)).astype("int32")
# The new A_hat_square_uv values that we would get if we removed the edge from u to each of the neighbors, respectively
a_hat_uv = self.compute_new_a_hat_uv(candidate_edges)
XW = self.compute_XW()
# compute the struct scores for all neighbors
struct_scores = self.structure_score(a_hat_uv, XW)
if len(neighbors) >= n: # do we have enough neighbors for the number of desired influencers?
influence_nodes = neighbors[np.argsort(struct_scores)[:n]]
if add_additional_nodes:
return influence_nodes, np.array([])
return influence_nodes
else:
influence_nodes = neighbors
if add_additional_nodes: # Add additional influencers by connecting them to u first.
# Compute the set of possible additional influencers, i.e. all nodes except the ones
# that are already connected to u.
poss_add_infl = np.setdiff1d(np.setdiff1d(
np.arange(self.modified_adj.shape[0]), neighbors), self.target)
n_possible_additional = len(poss_add_infl)
n_additional_attackers = n - len(neighbors)
possible_edges = np.column_stack(
( | np.tile(self.target, n_possible_additional) | numpy.tile |
# %%
"""
Tests for the DDPG module
"""
import numpy as np
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import ddpg
def run_ddpg_gridworld(n_iters=1002, show=False):
"""
run an experiment of the DDPG agent in a grid world env
"""
# define critic and actor networks
# example critic
state_in = keras.Input(shape=2)
action_in = keras.Input(shape=2)
goal_in = keras.Input(shape=2)
f_x = keras.layers.Concatenate(axis=-1)([
state_in,
action_in,
goal_in
])
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.Dense(30, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(10, activation='relu')(f_x)
f_x = keras.layers.Dense(1, activation='linear')(f_x)
critic = keras.Model(
inputs=[state_in, action_in, goal_in],
outputs=f_x
)
critic.compile(
optimizer=tf.keras.optimizers.Adam(lr=0.01),
loss='mse'
)
# example actor
state_in = keras.Input(shape=2)
goal_in = keras.Input(shape=2)
f_x = keras.layers.Concatenate(axis=-1)([
state_in,
goal_in
])
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
f_x = keras.layers.Dense(10, activation='relu')(f_x)
f_x = keras.layers.Dense(2, activation='linear')(f_x)
f_x = tf.keras.backend.l2_normalize(
f_x, axis=-1
)
actor = keras.Model(
inputs=[state_in, goal_in],
outputs=f_x
)
actor.compile(
tf.keras.optimizers.Adam(lr=0.002),
loss='mse'
)
agent = ddpg.ddpg.DDPGBase(
actor,
critic,
critic_epochs=1,
actor_epochs=1,
gamma=0.9,
tau=0.4
)
# define gridworld env
goal = np.array([0.5, 0.5])
def calculate_reward(next_states, goal):
next_states = next_states.reshape(-1, 2)
return (np.linalg.norm(
goal[None, :] - next_states,
axis=-1
).reshape(-1) < 0.2).astype(np.float)
def calculate_next_state(state, action):
return np.clip(
state + action/40,
0, 1
).reshape(-1)
# training loop
states = []
actions = []
goals = []
rewards = []
next_states = []
performance_trajectory = []
n_state_grid = 50
state_grid = np.stack(np.meshgrid(
np.linspace(0, 1, n_state_grid),
np.linspace(0, 1, n_state_grid)
))[[1, 0]].reshape(2, n_state_grid*n_state_grid).T
for iteration in range(n_iters):
if len(states) > 1000:
states = states[-1000:]
actions = actions[-1000:]
goals = goals[-1000:]
rewards = rewards[-1000:]
next_states = next_states[-1000:]
if iteration % 50 == 1:
print('Iteration {}/{}'.format(iteration, n_iters))
state = np.random.rand(2)
action = 2*np.random.rand(2)-1
action = action / np.linalg.norm(action)
next_state = calculate_next_state(
state, action
)
reward = calculate_reward(
next_state,
goal
)[0]
states.append(state)
actions.append(action)
goals.append(goal)
rewards.append(reward)
next_states.append(next_state)
if iteration % 100 == 1:
for _ in range(50):
agent.train(
np.array(states),
np.array(actions),
np.array(goals),
np.array(rewards),
np.array(next_states)
)
if iteration % 200 == 1:
returns = []
if show:
plt.figure(figsize=(7, 7))
for _ in range(20):
test_state = np.random.rand(2)
test_rewards = []
traj = []
for __ in range(50):
traj.append(test_state.copy())
action = np.array(agent.actor([
test_state.reshape((1, 2)),
goal.reshape((1, 2))
]))
test_state = calculate_next_state(
test_state, action.reshape(-1)
)
test_rewards.append(
calculate_reward(
test_state,
goal
)[0]
)
returns.append(
np.sum(
| np.array(test_rewards) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import smuthi.coordinates as coord
import smuthi.field_expansion as fldex
import smuthi.vector_wave_functions as vwf
import smuthi.particles as part
import smuthi.particle_coupling as pc
import smuthi.scattered_field as sf
import smuthi.memoizing as memo
import warnings
import sys
from tqdm import tqdm
class InitialField:
"""Base class for initial field classes"""
def __init__(self, vacuum_wavelength):
self.vacuum_wavelength = vacuum_wavelength
def spherical_wave_expansion(self, particle, layer_system):
"""Virtual method to be overwritten."""
pass
def plane_wave_expansion(self, layer_system, i):
"""Virtual method to be overwritten."""
pass
def piecewise_field_expansion(self, layer_system):
"""Virtual method to be overwritten."""
pass
def angular_frequency(self):
"""Angular frequency.
Returns:
Angular frequency (float) according to the vacuum wavelength in units of c=1.
"""
return coord.angular_frequency(self.vacuum_wavelength)
class InitialPropagatingWave(InitialField):
"""Base class for plane waves and Gaussian beams
Args:
vacuum_wavelength (float):
polar_angle (float): polar propagation angle (0 means, parallel to z-axis)
azimuthal_angle (float): azimuthal propagation angle (0 means, in x-z plane)
polarization (int): 0 for TE/s, 1 for TM/p
amplitude (float or complex): Electric field amplitude
reference_point (list): Location where electric field of incoming wave equals amplitude
"""
def __init__(self, vacuum_wavelength, polar_angle, azimuthal_angle, polarization, amplitude=1,
reference_point=None):
assert (polarization == 0 or polarization == 1)
InitialField.__init__(self, vacuum_wavelength)
if np.isclose(np.cos(polar_angle), 0):
raise ValueError('propagating waves not defined in the xy-plane')
self.polar_angle = polar_angle
self.azimuthal_angle = azimuthal_angle
self.polarization = polarization
self.amplitude = amplitude
if reference_point:
self.reference_point = reference_point
else:
self.reference_point = [0, 0, 0]
def spherical_wave_expansion(self, particle, layer_system):
"""Regular spherical wave expansion of the wave including layer system response, at the locations of the
particles.
Args:
particle (smuthi.particles.Particle): particle relative to which the swe is computed
layer_system (smuthi.layer.LayerSystem): stratified medium
Returns:
regular smuthi.field_expansion.SphericalWaveExpansion object
"""
i = layer_system.layer_number(particle.position[2])
pwe_up, pwe_down = self.plane_wave_expansion(layer_system, i)
return (fldex.pwe_to_swe_conversion(pwe_up, particle.l_max, particle.m_max, particle.position)
+ fldex.pwe_to_swe_conversion(pwe_down, particle.l_max, particle.m_max, particle.position))
def piecewise_field_expansion(self, layer_system):
"""Compute a piecewise field expansion of the initial field.
Args:
layer_system (smuthi.layer.LayerSystem): stratified medium
Returns:
smuthi.field_expansion.PiecewiseWaveExpansion object
"""
pfe = fldex.PiecewiseFieldExpansion()
for i in range(layer_system.number_of_layers()):
pwe_up, pwe_down = self.plane_wave_expansion(layer_system, i)
pfe.expansion_list.append(pwe_up)
pfe.expansion_list.append(pwe_down)
return pfe
def electric_field(self, x, y, z, layer_system):
"""Evaluate the complex electric field corresponding to the wave.
Args:
x (array like): Array of x-values where to evaluate the field (length unit)
y (array like): Array of y-values where to evaluate the field (length unit)
z (array like): Array of z-values where to evaluate the field (length unit)
layer_system (smuthi.layer.LayerSystem): Stratified medium
Returns
Tuple (E_x, E_y, E_z) of electric field values
"""
pfe = self.piecewise_field_expansion(layer_system=layer_system)
return pfe.electric_field(x, y, z)
class GaussianBeam(InitialPropagatingWave):
"""Class for the representation of a Gaussian beam as initial field."""
def __init__(self, vacuum_wavelength, polar_angle, azimuthal_angle, polarization, beam_waist,
k_parallel_array='default', azimuthal_angles_array='default', amplitude=1, reference_point=None):
InitialPropagatingWave.__init__(self, vacuum_wavelength, polar_angle, azimuthal_angle, polarization, amplitude,
reference_point)
self.beam_waist = beam_waist
if type(k_parallel_array) == str and k_parallel_array == 'default':
k_parallel_array = coord.default_k_parallel
if type(azimuthal_angles_array) == str and azimuthal_angles_array == 'default':
azimuthal_angles_array = coord.default_azimuthal_angles
self.k_parallel_array = k_parallel_array
self.azimuthal_angles_array = azimuthal_angles_array
def plane_wave_expansion(self, layer_system, i, k_parallel_array=None, azimuthal_angles_array=None):
"""Plane wave expansion of the Gaussian beam.
Args:
layer_system (smuthi.layer.LayerSystem): stratified medium
i (int): layer number in which to evaluate the expansion
k_parallel_array (numpy.ndarray): in-plane wavenumber array for the expansion. if none specified,
self.k_parallel_array is used
azimuthal_angles_array (numpy.ndarray): azimuthal angles for the expansion. if none specified,
self.azimuthal_angles_array is used
Returns:
tuple of to smuthi.field_expansion.PlaneWaveExpansion objects, one for upgoing and one for downgoing
component
"""
if k_parallel_array is None:
k_parallel_array = self.k_parallel_array
if azimuthal_angles_array is None:
azimuthal_angles_array = self.azimuthal_angles_array
if np.cos(self.polar_angle) > 0:
iG = 0 # excitation layer number
kind = 'upgoing'
else:
iG = layer_system.number_of_layers() - 1
kind = 'downgoing'
niG = layer_system.refractive_indices[iG] # refractive index in excitation layer
if niG.imag:
warnings.warn('beam coming from absorbing medium')
k_iG = niG * self.angular_frequency()
z_iG = layer_system.reference_z(iG)
loz = layer_system.lower_zlimit(iG)
upz = layer_system.upper_zlimit(iG)
pwe_exc = fldex.PlaneWaveExpansion(k=k_iG, k_parallel=k_parallel_array, azimuthal_angles=azimuthal_angles_array,
kind=kind, reference_point=[0, 0, z_iG], lower_z=loz, upper_z=upz)
k_Gx = k_iG * np.sin(self.polar_angle) * np.cos(self.azimuthal_angle)
k_Gy = k_iG * np.sin(self.polar_angle) * np.sin(self.azimuthal_angle)
kp = pwe_exc.k_parallel_grid()
al = pwe_exc.azimuthal_angle_grid()
kx = kp * np.cos(al)
ky = kp * np.sin(al)
kz = pwe_exc.k_z_grid()
w = self.beam_waist
r_G = self.reference_point
g = (self.amplitude * w**2 / (4 * np.pi) * np.exp(-w**2 / 4 * ((kx - k_Gx)**2 + (ky - k_Gy)**2))
* np.exp(-1j * (kx * r_G[0] + ky * r_G[1] + kz * (r_G[2] - z_iG))) )
pwe_exc.coefficients[0, :, :] = g * np.cos(al - self.azimuthal_angle + self.polarization * np.pi/2)
if np.cos(self.polar_angle) > 0:
pwe_exc.coefficients[1, :, :] = g * np.sin(al - self.azimuthal_angle + self.polarization * np.pi/2)
else:
pwe_exc.coefficients[1, :, :] = - g * np.sin(al - self.azimuthal_angle + self.polarization * np.pi/2)
pwe_up, pwe_down = layer_system.response(pwe_exc, from_layer=iG, to_layer=i)
if iG == i:
if kind == 'upgoing':
pwe_up = pwe_up + pwe_exc
elif kind == 'downgoing':
pwe_down = pwe_down + pwe_exc
return pwe_up, pwe_down
def propagated_far_field(self, layer_system):
"""Evaluate the far field intensity of the reflected / transmitted initial field.
Args:
layer_system (smuthi.layers.LayerSystem): Stratified medium
Returns:
A tuple of smuthi.field_expansion.FarField objects, one for forward (i.e., into the top hemisphere) and one
for backward propagation (bottom hemisphere).
"""
i_top = layer_system.number_of_layers() - 1
top_ff = fldex.pwe_to_ff_conversion(vacuum_wavelength=self.vacuum_wavelength,
plane_wave_expansion=self.plane_wave_expansion(layer_system, i_top)[0])
bottom_ff = fldex.pwe_to_ff_conversion(vacuum_wavelength=self.vacuum_wavelength,
plane_wave_expansion=self.plane_wave_expansion(layer_system, 0)[1])
return top_ff, bottom_ff
def initial_intensity(self, layer_system):
"""Evaluate the incoming intensity of the initial field.
Args:
layer_system (smuthi.layers.LayerSystem): Stratified medium
Returns:
A smuthi.field_expansion.FarField object holding the initial intensity information.
"""
if np.cos(self.polar_angle) > 0: # bottom illumination
ff = fldex.pwe_to_ff_conversion(vacuum_wavelength=self.vacuum_wavelength,
plane_wave_expansion=self.plane_wave_expansion(layer_system, 0)[0])
else: # top illumination
i_top = layer_system.number_of_layers() - 1
ff = fldex.pwe_to_ff_conversion(vacuum_wavelength=self.vacuum_wavelength,
plane_wave_expansion=self.plane_wave_expansion(layer_system, i_top)[1])
return ff
class PlaneWave(InitialPropagatingWave):
"""Class for the representation of a plane wave as initial field.
Args:
vacuum_wavelength (float):
polar_angle (float): polar angle of k-vector (0 means, k is parallel to z-axis)
azimuthal_angle (float): azimuthal angle of k-vector (0 means, k is in x-z plane)
polarization (int): 0 for TE/s, 1 for TM/p
amplitude (float or complex): Plane wave amplitude at reference point
reference_point (list): Location where electric field of incoming wave equals amplitude
"""
def plane_wave_expansion(self, layer_system, i):
"""Plane wave expansion for the plane wave including its layer system response. As it already is a plane wave,
the plane wave expansion is somehow trivial (containing only one partial wave, i.e., a discrete plane wave
expansion).
Args:
layer_system (smuthi.layers.LayerSystem): Layer system object
i (int): layer number in which the plane wave expansion is valid
Returns:
Tuple of smuthi.field_expansion.PlaneWaveExpansion objects. The first element is an upgoing PWE, whereas the
second element is a downgoing PWE.
"""
if np.cos(self.polar_angle) > 0:
iP = 0
kind = 'upgoing'
else:
iP = layer_system.number_of_layers() - 1
kind = 'downgoing'
niP = layer_system.refractive_indices[iP]
neff = np.sin([self.polar_angle]) * niP
alpha = np.array([self.azimuthal_angle])
angular_frequency = coord.angular_frequency(self.vacuum_wavelength)
k_iP = niP * angular_frequency
k_Px = k_iP * np.sin(self.polar_angle) * np.cos(self.azimuthal_angle)
k_Py = k_iP * np.sin(self.polar_angle) * np.sin(self.azimuthal_angle)
k_Pz = k_iP * np.cos(self.polar_angle)
z_iP = layer_system.reference_z(iP)
amplitude_iP = self.amplitude * np.exp(-1j * (k_Px * self.reference_point[0] + k_Py * self.reference_point[1]
+ k_Pz * (self.reference_point[2] - z_iP)))
loz = layer_system.lower_zlimit(iP)
upz = layer_system.upper_zlimit(iP)
pwe_exc = fldex.PlaneWaveExpansion(k=k_iP, k_parallel=neff*angular_frequency, azimuthal_angles=alpha, kind=kind,
reference_point=[0, 0, z_iP], lower_z=loz, upper_z=upz)
pwe_exc.coefficients[self.polarization, 0, 0] = amplitude_iP
pwe_up, pwe_down = layer_system.response(pwe_exc, from_layer=iP, to_layer=i)
if iP == i:
if kind == 'upgoing':
pwe_up = pwe_up + pwe_exc
elif kind == 'downgoing':
pwe_down = pwe_down + pwe_exc
return pwe_up, pwe_down
class DipoleSource(InitialField):
"""Class for the representation of a single point dipole source.
Args:
vacuum_wavelength (float): vacuum wavelength (length units)
dipole_moment (list or tuple): (x, y, z)-coordinates of dipole moment vector
position (list or tuple): (x, y, z)-coordinates of dipole position
k_parallel (numpy.ndarray or str): In-plane wavenumber.
If 'default', use smuthi.coordinates.default_k_parallel
azimuthal_angles (numpy.ndarray or str): Azimuthal angles for plane wave expansions
If 'default', use smuthi.coordinates.default_azimuthal_angles
"""
def __init__(self, vacuum_wavelength, dipole_moment, position, k_parallel='default', azimuthal_angles='default'):
InitialField.__init__(self, vacuum_wavelength)
self.dipole_moment = dipole_moment
self.position = position
self.k_parallel = k_parallel
self.azimuthal_angles = azimuthal_angles
def current(self):
r"""The current density takes the form
.. math::
\mathbf{j}(\mathbf{r}) = \delta(\mathbf{r} - \mathbf{r}_D) \mathbf{j}_D,
where :math:`\mathbf{j}_D = -j \omega \mathbf{\mu}`, :math:`\mathbf{r}_D` is the location of the dipole, :math:`\omega`
is the angular frequency and :math:`\mathbf{\mu}` is the dipole moment.
For further details, see 'Principles of nano optics' by Novotny and Hecht.
Returns:
List of [x, y, z]-components of current density vector :math:`\mathbf{j}_D`
"""
return [- 1j * self.angular_frequency() * self.dipole_moment[i] for i in range(3)]
def outgoing_spherical_wave_expansion(self, layer_system):
"""The dipole field as an expansion in spherical vector wave functions.
Args:
layer_system (smuthi.layers.LayerSystem): stratified medium
Returns:
outgoing smuthi.field_expansion.SphericalWaveExpansion object
"""
laynum = layer_system.layer_number(self.position[2])
k = layer_system.refractive_indices[laynum] * self.angular_frequency()
swe_out = fldex.SphericalWaveExpansion(k=k, l_max=1, m_max=1, kind='outgoing', reference_point=self.position,
lower_z=layer_system.lower_zlimit(laynum),
upper_z=layer_system.upper_zlimit(laynum))
l = 1
for tau in range(2):
for m in range(-1, 2):
ex, ey, ez = vwf.spherical_vector_wave_function(0, 0, 0, k, 1, tau, l, -m)
b = 1j * k / np.pi * 1j * self.angular_frequency() * (ex * self.current()[0] + ey * self.current()[1]
+ ez * self.current()[2])
swe_out.coefficients[fldex.multi_to_single_index(tau, l, m, 1, 1)] = b
return swe_out
def spherical_wave_expansion(self, particle, layer_system):
"""Regular spherical wave expansion of the wave including layer system response, at the locations of the
particles.
Args:
particle (smuthi.particles.Particle): particle relative to which the swe is computed
layer_system (smuthi.layer.LayerSystem): stratified medium
Returns:
regular smuthi.field_expansion.SphericalWaveExpansion object
"""
virtual_particle = part.Particle(position=self.position, l_max=1, m_max=1)
wd = pc.direct_coupling_block(vacuum_wavelength=self.vacuum_wavelength, receiving_particle=particle,
emitting_particle=virtual_particle, layer_system=layer_system)
wr = pc.layer_mediated_coupling_block(vacuum_wavelength=self.vacuum_wavelength, receiving_particle=particle,
emitting_particle=virtual_particle, layer_system=layer_system,
k_parallel=self.k_parallel)
k = self.angular_frequency() * layer_system.refractive_indices[layer_system.layer_number(particle.position[2])]
swe = fldex.SphericalWaveExpansion(k=k, l_max=particle.l_max, m_max=particle.m_max, kind='regular',
reference_point=particle.position)
swe.coefficients = np.dot(wd + wr, self.outgoing_spherical_wave_expansion(layer_system).coefficients)
return swe
def piecewise_field_expansion(self, layer_system, include_direct_field=True, include_layer_response=True):
"""Compute a piecewise field expansion of the dipole field.
Args:
layer_system (smuthi.layer.LayerSystem): stratified medium
include_direct_field (bool): if True (default), the direct dipole field is included.
otherwise, only the layer response of the dipole field is
returned.
include_layer_response (bool): if True (default), the layer response of the dipole field is
included. otherwise, only the direct dipole field is
returned.
Returns:
smuthi.field_expansion.PiecewiseWaveExpansion object
"""
pfe = fldex.PiecewiseFieldExpansion()
if include_direct_field:
pfe.expansion_list.append(self.outgoing_spherical_wave_expansion(layer_system))
if include_layer_response:
for i in range(layer_system.number_of_layers()):
# layer response as plane wave expansions
pwe_up, pwe_down = fldex.swe_to_pwe_conversion(swe=self.outgoing_spherical_wave_expansion(layer_system),
k_parallel=self.k_parallel,
azimuthal_angles=self.azimuthal_angles,
layer_system=layer_system, layer_number=i,
layer_system_mediated=True)
if i > 0:
pfe.expansion_list.append(pwe_up)
if i < layer_system.number_of_layers() - 1:
pfe.expansion_list.append(pwe_down)
return pfe
def electric_field(self, x, y, z, layer_system, include_direct_field=True, include_layer_response=True):
"""Evaluate the complex electric field of the dipole source.
Args:
x (array like): Array of x-values where to evaluate the field (length unit)
y (array like): Array of y-values where to evaluate the field (length unit)
z (array like): Array of z-values where to evaluate the field (length unit)
layer_system (smuthi.layer.LayerSystem): Stratified medium
include_direct_field (bool): if True (default), the direct dipole field is included.
otherwise, only the layer response of the dipole field is
returned.
include_layer_response (bool): if True (default), the layer response of the dipole field is
included. otherwise, only the direct dipole field is
returned.
Returns
Tuple (E_x, E_y, E_z) of electric field values
"""
pfe = self.piecewise_field_expansion(layer_system=layer_system, include_direct_field=include_direct_field,
include_layer_response=include_layer_response)
return pfe.electric_field(x, y, z)
def dissipated_power_homogeneous_background(self, layer_system):
r"""Compute the power that the dipole would radiate in an infinite homogeneous medium of the same refractive
index as the layer that contains the dipole.
.. math::
P_0 = \frac{|\mathbf{\mu}| k \omega^3}{12 \pi}
Args:
layer_system (smuthi.layers.LayerSystem): stratified medium
Returns:
power (float)
"""
laynum = layer_system.layer_number(self.position[2])
k = layer_system.refractive_indices[laynum] * self.angular_frequency()
mu2 = abs(self.dipole_moment[0])**2 + abs(self.dipole_moment[1])**2 + abs(self.dipole_moment[2])**2
p = mu2 * k * self.angular_frequency()**3 / (12 * np.pi)
return p
def check_dissipated_power_homogeneous_background(self, layer_system):
laynum = layer_system.layer_number(self.position[2])
e_x_in, e_y_in, e_z_in = self.electric_field(x=self.position[0]+10, y=self.position[1]+10, z=self.position[2]+10,
layer_system=layer_system, include_direct_field=True)
k = layer_system.refractive_indices[laynum] * self.angular_frequency()
p = self.angular_frequency() / 2 * (np.conjugate(self.dipole_moment[0]) * (e_x_in)
+ | np.conjugate(self.dipole_moment[1]) | numpy.conjugate |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = | np.array([]) | numpy.array |
"""
With the **track** module, you can create, import, edit, save, and
export tracks.
"""
from pathlib import Path
import numpy as np
from PIL import Image
import logging
from line_track_designer.printer import Printer
from line_track_designer.tile import Tile, Tiles
from line_track_designer.error import LineTrackDesignerError
from line_track_designer.markdown import Markdown
class Track:
"""
Representation of a track.
An instance of the **Track** class is composed of three fields:
* **tiles**: array which contains the number of each tile of the track
* **orient**: array which indicates the orientation of each tile
* **name**: name of the track
"""
@staticmethod
def read(file, name='track'):
"""
Read a text file representing a track
and return the track associated.
Args:
file (str): filename
name (str): name of the track
Returns:
Track: the track associated to the file
Raises:
LineTrackDesignerError: file not found
LineTrackDesignerError: bad filename extension: requires .txt
"""
try:
f = open(file, 'r')
except IOError:
raise LineTrackDesignerError('file {} not found'.format(file))
p = Path(file)
if p.suffix != '.txt':
raise LineTrackDesignerError(
'bad filename extension: requires .txt')
lines = f.readlines()
f.close()
tiles, orient = [], []
for line in lines:
line = line.strip('\n').split(' ')
lt, lo = [], []
for i in line:
t, o = i.split(';')
lt.append(int(t))
lo.append(int(o))
tiles.append(lt)
orient.append(lo)
logging.info('Reading track: {}'.format(file))
return Track(
np.array(tiles, dtype=int),
np.array(orient, dtype=int), name)
@staticmethod
def zeros(nrow, ncol, name='track'):
"""
Create an empty track.
Args:
nrow (int): number of rows
ncol (int): number of columns
name (str): name of the track
Returns:
Track: empty track (only zeros)
"""
tiles = np.zeros((nrow, ncol), dtype=int)
orient = np.zeros((nrow, ncol), dtype=int)
return Track(tiles, orient, name)
@staticmethod
def max_shape(width, height):
"""
Return the maximum number of rows and columns
of a track limited by a width and a height in mm.
Args:
width (int): width in mm
height (int): height in mm
Returns:
tuple of int: number of rows and columns
"""
return width // Tile.SIDE, height // Tile.SIDE
def __init__(self, tiles, orient, name='track'):
"""
Init a track. The arguments tiles and orient must be numpy arrays.
For example:
.. code-block:: python
import numpy as np
from line_track_designer.track import Track
tiles = np.array([
[3, 2, 3],
[2, 11, 2],
[3, 2, 3]
])
orient = np.array([
[1, 1, 0],
[0, 0, 0],
[2, 1, 3]
])
track = Track(tiles, orient, 'my track')
Args:
tiles (numpy.array): array of tiles
orient (numpy.array): array of orientations
name (str): name of the track
Raises:
LineTrackDesignerError: tiles and orient must have the same shape
LineTrackDesignerError: invalid values
"""
if (tiles.shape != orient.shape):
raise LineTrackDesignerError(
'tiles and orient must have the same shape')
nrow, ncol = tiles.shape
for i in range(nrow):
for j in range(ncol):
t, o = tiles[i][j], orient[i][j]
if t != 0 and not Tile.is_valid(t):
raise LineTrackDesignerError(
'{} is not a valid tile value'.format(t))
if not 0 <= o <= 3:
raise LineTrackDesignerError(
'{} is not a valid orient value'.format(o))
self._name = name
self._tiles = tiles.copy()
self._orient = orient.copy()
logging.info('Track created')
@property
def tiles(self):
"""Get the array of tiles."""
return self._tiles
@property
def orient(self):
"""Get the array of orientations."""
return self._orient
@property
def name(self):
"""Get the name of the track."""
return self._name
def __str__(self):
"""
Make the string format of the track.
The tiles and orient matrix are superposed in one matrix.
Each couple of values is separated by a semicolon.
With the last example, we obtain:
.. code-block:: text
3;1 2;1 3;0
2;0 11;0 2;0
3;2 2;1 3;3
"""
lines = []
for line_t, line_o in zip(self.tiles, self.orient):
line = ' '.join(
['{};{}'.format(t, o) for t, o in zip(line_t, line_o)])
lines.append(line)
return '\n'.join(lines)
def __repr__(self):
"""
Make the repr format of the track.
It's the same than the string format.
"""
return str(self)
def add_col(self):
"""
Add a column to the track. This column is filled with 0.
"""
new_col = np.zeros(self.tiles.shape[0], dtype=int)
new_col = np.atleast_2d(new_col).T
self._tiles = | np.hstack([self.tiles, new_col]) | numpy.hstack |
import numpy as np
from sysopt import Signature, Block
from sysopt.blocks import InputOutput
# Variable
# - name
# - bounds[optional]
# SIVariable(Variable)
# - units
# Signature
# - states
# - constraints
# - inputs
# - outputs
# - parameters
# Metadata
# - states: List[Union[str, Variable]]
# - constraints
# - inputs
# ----------------Test Fixtures---------------------
class BlockMock(Block):
def __init__(self):
super().__init__(
Signature(inputs=2, outputs=2)
)
def compute_outputs(self, t, x, z, inputs, parameters):
return t * inputs
def mock_block_factory():
sig = Signature(inputs=2, outputs=2)
def g(t, u, _):
return t * u
return InputOutput(sig, g)
def test_block_oop_api():
block = BlockMock()
assert block.signature == Signature(
inputs=2, outputs=2, parameters=0, states=0, constraints=0
)
assert len(block.inputs) == 2
assert len(block.outputs) == 2
def test_block_factory():
block = mock_block_factory()
arg = np.array([1, 3], dtype=float)
T = 2
truth = | np.array([2, 6], dtype=float) | numpy.array |
import numpy as np
import cv2
img = cv2.imread(r'C:\Users\Saptarshi\Pictures\cont1.jpg',1)
#resize image
img = cv2.resize(img,(300,400))
#convert image to grayscale
grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#blurr image to smooth
blurr = cv2.GaussianBlur(grey, (5,5),0)
#finding edges
edge = cv2.Canny(blurr, 0, 50)
#apadtive threshold and canny gave similar final output
#threshold = cv2.adaptiveThreshold(blurr ,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,2)
#find contours in thresholded image and sort them according to decreasing area
_, contours, hierarchy = cv2.findContours(edge.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
contours = sorted(contours, key=cv2.contourArea, reverse= True)
#contour approximation
for i in contours:
elip = cv2.arcLength(i, True)
approx = cv2.approxPolyDP(i,0.08*elip, True)
if len(approx) == 4 :
doc = approx
break
#draw contours
cv2.drawContours(img, [doc], -1, (0, 255, 0), 2)
#reshape to avoid errors ahead
doc=doc.reshape((4,2))
#create a new array and initialize
new_doc = np.zeros((4,2), dtype="float32")
Sum = doc.sum(axis = 1)
new_doc[0] = doc[np.argmin(Sum)]
new_doc[2] = doc[np.argmax(Sum)]
Diff = np.diff(doc, axis=1)
new_doc[1] = doc[np.argmin(Diff)]
new_doc[3] = doc[np.argmax(Diff)]
(tl,tr,br,bl) = new_doc
#find distance between points and get max
dist1 = np.linalg.norm(br-bl)
dist2 = | np.linalg.norm(tr-tl) | numpy.linalg.norm |
import numpy as np
from numpy.random import rand
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
import glob as glob
import pandas as pd
import json
from scipy.interpolate import interp1d
from scipy import interp
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from skimage import exposure,img_as_ubyte
from moviepy.editor import VideoClip
from moviepy.editor import ImageSequenceClip
from skimage import color
import datetime
import time
import argparse
import os
import h5py
def read_files(args):
'''read in, sort by time and output numpy arrays containing each image frame in the simulation'''
#check if hdf5 file exists
for fname in os.listdir(args.fName):
if fname.endswith('h5'):
if fname == 'dsetf.h5':
print('h5 file found')
hf = h5py.File(args.fName+'/'+fname)
fN = pd.DataFrame(list(hf.keys()))
print(len(list(hf.keys())))
# dfN = pd.DataFrame(glob.glob(args.fName+'/defect*.dat'))
params = np.loadtxt('params.txt')
# print(fN)
fN['time'] = fN[0].str.extract(r'(\d*\.?\d*)\.dat').astype('float')
fN.sort_values(by=['time'],inplace=True)
# dfN['time'] = dfN[0].str.extract(r'(\d*\.?\d*)\.dat').astype('float')
# dfN.sort_values(by=['time'],inplace=True)
#Sort fileNames by number
imSeq = [np.array(hf.get(f)) for f in fN[0]]
break
# dimSeq = [np.loadtxt(f) for f in dfN[0]]
else:
fileNames = glob.glob(args.fName+'/out*.dat')
# dfileNames = glob.glob(args.fName+'/defect*.dat')
fN = pd.DataFrame(fileNames)
fN['time'] = fN[0].str.extract(r'(\d*\.?\d*)\.dat').astype('float')
fN.sort_values(by=['time'],inplace=True)
# dfN = pd.DataFrame(dfileNames)
# dfN['time'] = dfN[0].str.extract(r'(\d*\.?\d*)\.dat').astype('float')
# dfN.sort_values(by=['time'],inplace=True)
#Sort fileNames by number
imSeq = [np.loadtxt(f) for f in fN[0]]
# dimSeq = [np.loadtxt(f) for f in dfN[0]]
# return [imSeq,dimSeq]
return [imSeq, fN]
parser = argparse.ArgumentParser(description='Make some movies')
parser.add_argument('fName',type=str,help='directory name for data')
args = parser.parse_args()
imSeq, fN = read_files(args)
fps=24
secondsPframe_in = fN['time'].diff().mean()
fps_in = secondsPframe_in**-1
params = | np.loadtxt('param.txt') | numpy.loadtxt |
import pandas as pd
class DataSet():
'''
Maintains a data set given simple arrays or lists of values, and
provides basic statistics, including covariance and correlation
matrices.
'''
def __init__(self, data, labels=None):
'''
Class constructor
Args:
data - 2 dimensional Python array (list). List of lists of
values.
labels - list of column headings. Optional.
Returns:
dictionary of the data set in the form of
{column-name: [values]}
Exceptions:
ValueError if input is invalid
'''
# Validate user input. Should be list of at least 2 lists.
try:
if len(data) == 1:
raise ValueError('Input should be two dimensional list')
except TypeError:
raise ValueError('Input should be two dimensional list')
first_length = len(data[0])
for array in data[1:]:
if len(array) != first_length:
raise ValueError('Input arrays should be of same length')
# If labels None, create labels of list form Columni
if labels == None:
labels = ['Column' + str(n) for n in range(len(data))]
# Populate dictionary of data lists by column name
self.data_set = {}
for i in range(len(data)):
self.data_set[labels[i]] = data[i]
####### Private helper functions
def __mvs(self, data):
'''
Calculate mean, variance, and standard deviation for an array.
Args:
data - an 'array' (list) of values
Returns:
dictionary with mean, variation, and standard deviation
'''
# size of array and mean
n = len(data)
mean = sum(data) / n
# variance and standard deviation
sq_differences = [(value - mean)**2 for value in data]
variance = sum(sq_differences) / (n - 1)
std = variance ** 0.5
# package up the results
statistics = {'mean': mean, 'variance': variance,
'standard_deviation': std}
return statistics
def __covar(self, colA, colB):
'''
Calculate covariance for two arrays.
Args:
ColA - an 'array' (list) of values
ColB - an 'array' (list) of values
Returns:
Covariance
'''
assert len(colA) == len(colB), \
f'Arrays are of unequal length ({len(colA), len(colB)}'
meanA = self.__mvs(colA)['mean']
meanB = self.__mvs(colB)['mean']
prod = [(colA[x] - meanA) * (colB[x] - meanB)
for x in range(len(colA))]
return sum(prod) / (len(colA) - 1)
def __r(self, colA, colB):
'''
Calculate correlation coefficient for two arrays.
Args:
ColA - an 'array' (list) of values
ColB - an 'array' (list) of values
Returns:
Correlation coefficient
'''
assert len(colA) == len(colB), \
f'Arrays are of unequal length ({len(colA), len(colB)}'
covar = self.__covar(colA, colB)
stdA = self.__mvs(colA)['standard_deviation']
stdB = self.__mvs(colB)['standard_deviation']
return covar / (stdA * stdB)
####### Public functions
def get_dataframe(self):
'''
Returns the data set as a Pandas DataFrame
'''
return pd.DataFrame(self.data_set)
def statistics(self):
'''
Gets basic staistics for the data set. Returns as a Pandas
DataFrame
'''
self.stats = {}
for column in self.data_set.keys():
self.stats[column] = self.__mvs(self.data_set[column])
return pd.DataFrame(self.stats).T
def covariance(self):
'''
Calculates variances/covariances between arrays. Returns variance/covariance
matrix as Pandas Dataframe
'''
# calculate covariance/variation of each permutation of the two arrays
# building the matrix
self.cov_matrix = []
for i in self.data_set.values():
row = []
for j in self.data_set.values():
# Find covariance
row.append(self.__covar(i, j))
self.cov_matrix.append(row)
labels = self.data_set.keys()
return pd.DataFrame(self.cov_matrix, columns=labels, index=labels)
def correlation(self):
'''
Calculates correlation between arrays. Returns correlation
matrix as Pandas Dataframe
'''
# calculate correlations of each permutation of the two arrays, building
# the matrix
self.cor_matrix = []
for i in self.data_set.values():
row = []
for j in self.data_set.values():
# Find r (pearson's correlation)
row.append(self.__r(i, j))
self.cor_matrix.append(row)
labels = self.data_set.keys()
return pd.DataFrame(self.cor_matrix, columns=labels, index=labels)
if __name__ == '__main__':
# Very quick unit tests to see if all the functions run and compare
# outputs with the equivalent numpy functions
from IPython.display import display
import numpy as np
# Some toy data
sales = [3505, 2400, 3027, 2798, 3700, 3250, 2689]
customers = [127, 80, 105, 92, 120, 115, 93]
# Instantiate
data = DataSet([sales, customers], ['sales', 'customers'])
#data = DataSet([sales, customers])
# Test case 1 -- Try getting dataframe
print('Get dataframe')
display(data.get_dataframe())
# Test case 2 -- And basic statistics
print('\nGet descriptive statistics')
display(data.statistics())
# Test results against numpy equivalents. Print any discrepancies.
passes = True
for col in data.stats.keys():
if data.stats[col]['mean'] != np.mean(data.data_set[col]):
passes = False
print(f'\'{col}\': means different\n')
print(f'Output: {data.stats[col]["mean"]}')
print(f'Actual: { | np.mean(data.data_set[col]) | numpy.mean |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from perlin import generate_perlin
def gaussian_2d_fast(size, amp, mu_x, mu_y, sigma):
x = np.arange(0, 1, 1/size[0])
y = np.arange(0, 1, 1/size[1])
xs, ys = np.meshgrid(x,y)
dxs = np.minimum(np.abs(xs-mu_x), 1-np.abs(xs-mu_x))
dys = np.minimum(np.abs(ys-mu_y), 1-np.abs(ys-mu_y))
heat_map = amp*np.exp(-(dxs**2+dys**2)/(2*sigma**2))
return heat_map
def excitability_matrix(sigma_e, sigma_i, perlin_scale, grid_offset,
p_e=0.05, p_i=0.05, we=0.22, g=4,
n_row_e=120, n_row_i=60, mu_gwn=0, multiple_connections=True,
expected_connectivity=True, is_plot=True):
n_pop_e = n_row_e**2
n_pop_i = n_row_i**2
gL = 25 * 1e-9 # Siemens
p_max_e = p_e / (2 * np.pi * sigma_e**2)
p_max_i = p_i / (2 * np.pi * sigma_i**2)
# Two landscapes: e and i. The contribution of each neuron is stored separately in the n_row_e**2 matrices
e_landscape = np.zeros((n_row_e**2, n_row_e, n_row_e))
i_landscape = np.zeros((n_row_i**2, n_row_e, n_row_e))
perlin = generate_perlin(n_row_e, perlin_scale, seed_value=0)
x = np.arange(0,1,1/n_row_e)
y = np.arange(0,1,1/n_row_e)
X, Y = np.meshgrid(x,y)
U = np.cos(perlin)
V = np.sin(perlin)
# Excitatory
mu_xs = np.arange(0,1,1/n_row_e)
mu_ys = np.arange(0,1,1/n_row_e)
counter = 0
for i, mu_x in enumerate(mu_xs):
for j, mu_y in enumerate(mu_ys):
x_offset = grid_offset / n_row_e * np.cos(perlin[i,j])
y_offset = grid_offset / n_row_e * np.sin(perlin[i,j])
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_e, mu_x+x_offset, mu_y+y_offset, sigma_e)
if not multiple_connections:
#clip probabilities at 1
e_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
e_landscape[counter] = mh
counter += 1
# Inhibitory
mu_xs = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
mu_ys = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
counter = 0
for mu_x in mu_xs:
for mu_y in mu_ys:
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_i, mu_x, mu_y, sigma_i)
if not multiple_connections:
#clip probabilities at 1
i_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
i_landscape[counter] = mh
counter += 1
# in total there should be n_pop_e * (n_pop_e * p_max_e) = 10 368 000 e-connections
# and n_pop_i * (n_pop_e * 0.05) = 2 592 000 i-connections
num_e_connections = np.sum(e_landscape)
num_i_connections = np.sum(i_landscape)
if multiple_connections:
e_calibration = 1
i_calibration = 1
else:
e_calibration = n_pop_e * n_pop_e * p_e / num_e_connections
i_calibration = n_pop_i * n_pop_e * p_i / num_i_connections
print('e_calibration is ', e_calibration)
print('i_calibration is ', i_calibration)
if expected_connectivity:
# calculate expected number of connections
e_landscape = n_row_e**2*np.mean(e_landscape, axis=0)
i_landscape = n_row_i**2*np.mean(i_landscape, axis=0)
else: # we sample
sample_e_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = e_landscape[:, i, j]
random_numbers = np.random.random(n_row_e**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_e_landscape[i, j] = num_connected
sample_i_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = i_landscape[:, i, j]
random_numbers = np.random.random(n_row_i**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_i_landscape[i, j] = num_connected
e_landscape = sample_e_landscape
i_landscape = sample_i_landscape
# Now we fill a landscape with physical units (mV)
rest_pot = -70 # mV
thres_pot = -55 # mV
ext_pot = mu_gwn / gL * 1e3 #mV
no_activity_pot = rest_pot + ext_pot # -56 mV when mu_gwn = 350 pA
landscape = no_activity_pot * np.ones((n_row_e, n_row_e))
# Synapse strengths
we = we * e_calibration #mV
wi = -g * we * i_calibration / e_calibration #mV
landscape += we * e_landscape
landscape += wi * i_landscape
# scale X and Y quiver according to values in ei_landscape. first normalize landscape
norm_landscape = np.copy(landscape)
norm_landscape -= np.amin(norm_landscape)
norm_landscape /= np.amax(norm_landscape)
U = 0.5*np.multiply(U, norm_landscape)
V = 0.5* | np.multiply(V, norm_landscape) | numpy.multiply |
import os
import copy
import numpy as np
from itertools import groupby
from .utils_def import totim_to_datetime
from . import import_optional_dependency
class ZoneBudget:
"""
ZoneBudget class
Parameters
----------
cbc_file : str or CellBudgetFile object
The file name or CellBudgetFile object for which budgets will be
computed.
z : ndarray
The array containing to zones to be used.
kstpkper : tuple of ints
A tuple containing the time step and stress period (kstp, kper).
The kstp and kper values are zero based.
totim : float
The simulation time.
aliases : dict
A dictionary with key, value pairs of zones and aliases. Replaces
the corresponding record and field names with the aliases provided.
When using this option in conjunction with a list of zones, the
zone(s) passed may either be all strings (aliases), all integers,
or mixed.
Returns
-------
None
Examples
--------
>>> from flopy.utils.zonbud import ZoneBudget
>>> zon = ZoneBudget.read_zone_file('zone_input_file')
>>> zb = ZoneBudget('zonebudtest.cbc', zon, kstpkper=(0, 0))
>>> zb.to_csv('zonebudtest.csv')
>>> zb_mgd = zb * 7.48052 / 1000000
"""
def __init__(
self,
cbc_file,
z,
kstpkper=None,
totim=None,
aliases=None,
verbose=False,
**kwargs,
):
from .binaryfile import CellBudgetFile
if isinstance(cbc_file, CellBudgetFile):
self.cbc = cbc_file
elif isinstance(cbc_file, str) and os.path.isfile(cbc_file):
self.cbc = CellBudgetFile(cbc_file)
else:
raise Exception(f"Cannot load cell budget file: {cbc_file}.")
if isinstance(z, np.ndarray):
assert np.issubdtype(
z.dtype, np.integer
), "Zones dtype must be integer"
else:
e = (
"Please pass zones as a numpy ndarray of (positive)"
" integers. {}".format(z.dtype)
)
raise Exception(e)
# Check for negative zone values
if np.any(z < 0):
raise Exception(
"Negative zone value(s) found:", np.unique(z[z < 0])
)
self.dis = None
if "model" in kwargs.keys():
self.model = kwargs.pop("model")
self.dis = self.model.dis
if "dis" in kwargs.keys():
self.dis = kwargs.pop("dis")
if len(kwargs.keys()) > 0:
args = ",".join(kwargs.keys())
raise Exception(f"LayerFile error: unrecognized kwargs: {args}")
# Check the shape of the cbc budget file arrays
self.cbc_shape = self.cbc.get_data(idx=0, full3D=True)[0].shape
self.nlay, self.nrow, self.ncol = self.cbc_shape
self.cbc_times = self.cbc.get_times()
self.cbc_kstpkper = self.cbc.get_kstpkper()
self.kstpkper = None
self.totim = None
if kstpkper is not None:
if isinstance(kstpkper, tuple):
kstpkper = [kstpkper]
for kk in kstpkper:
s = f"The specified time step/stress period does not exist {kk}"
assert kk in self.cbc.get_kstpkper(), s
self.kstpkper = kstpkper
elif totim is not None:
if isinstance(totim, float):
totim = [totim]
elif isinstance(totim, int):
totim = [float(totim)]
for t in totim:
s = f"The specified simulation time does not exist {t}"
assert t in self.cbc.get_times(), s
self.totim = totim
else:
# No time step/stress period or simulation time pass
self.kstpkper = self.cbc.get_kstpkper()
# Set float and integer types
self.float_type = np.float32
self.int_type = np.int32
# Check dimensions of input zone array
s = (
"Row/col dimensions of zone array {}"
" do not match model row/col dimensions {}".format(
z.shape, self.cbc_shape
)
)
assert z.shape[-2] == self.nrow and z.shape[-1] == self.ncol, s
if z.shape == self.cbc_shape:
izone = z.copy()
elif len(z.shape) == 2:
izone = np.zeros(self.cbc_shape, self.int_type)
izone[:] = z[:, :]
elif len(z.shape) == 3 and z.shape[0] == 1:
izone = np.zeros(self.cbc_shape, self.int_type)
izone[:] = z[0, :, :]
else:
e = f"Shape of the zone array is not recognized: {z.shape}"
raise Exception(e)
self.izone = izone
self.allzones = np.unique(izone)
self._zonenamedict = {z: f"ZONE_{z}" for z in self.allzones}
if aliases is not None:
s = (
"Input aliases not recognized. Please pass a dictionary "
"with key,value pairs of zone/alias."
)
assert isinstance(aliases, dict), s
# Replace the relevant field names (ignore zone 0)
seen = []
for z, a in iter(aliases.items()):
if z != 0 and z in self._zonenamedict.keys():
if z in seen:
raise Exception(
"Zones may not have more than 1 alias."
)
self._zonenamedict[z] = "_".join(a.split())
seen.append(z)
# self._iflow_recnames = self._get_internal_flow_record_names()
# All record names in the cell-by-cell budget binary file
self.record_names = [
n.strip() for n in self.cbc.get_unique_record_names(decode=True)
]
# Get imeth for each record in the CellBudgetFile record list
self.imeth = {}
for record in self.cbc.recordarray:
self.imeth[record["text"].strip().decode("utf-8")] = record[
"imeth"
]
# INTERNAL FLOW TERMS ARE USED TO CALCULATE FLOW BETWEEN ZONES.
# CONSTANT-HEAD TERMS ARE USED TO IDENTIFY WHERE CONSTANT-HEAD CELLS
# ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF FLOW.
# SWIADDTO--- terms are used by the SWI2 groundwater flow process.
internal_flow_terms = [
"CONSTANT HEAD",
"FLOW RIGHT FACE",
"FLOW FRONT FACE",
"FLOW LOWER FACE",
"SWIADDTOCH",
"SWIADDTOFRF",
"SWIADDTOFFF",
"SWIADDTOFLF",
]
# Source/sink/storage term record names
# These are all of the terms that are not related to constant
# head cells or face flow terms
self.ssst_record_names = [
n for n in self.record_names if n not in internal_flow_terms
]
# Initialize budget recordarray
array_list = []
if self.kstpkper is not None:
for kk in self.kstpkper:
recordarray = self._initialize_budget_recordarray(
kstpkper=kk, totim=None
)
array_list.append(recordarray)
elif self.totim is not None:
for t in self.totim:
recordarray = self._initialize_budget_recordarray(
kstpkper=None, totim=t
)
array_list.append(recordarray)
self._budget = np.concatenate(array_list, axis=0)
# Update budget record array
if self.kstpkper is not None:
for kk in self.kstpkper:
if verbose:
s = (
"Computing the budget for"
" time step {} in stress period {}".format(
kk[0] + 1, kk[1] + 1
)
)
print(s)
self._compute_budget(kstpkper=kk)
elif self.totim is not None:
for t in self.totim:
if verbose:
s = f"Computing the budget for time {t}"
print(s)
self._compute_budget(totim=t)
def _compute_budget(self, kstpkper=None, totim=None):
"""
Creates a budget for the specified zone array. This function only
supports the use of a single time step/stress period or time.
Parameters
----------
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
None
"""
# Initialize an array to track where the constant head cells
# are located.
ich = np.zeros(self.cbc_shape, self.int_type)
swiich = np.zeros(self.cbc_shape, self.int_type)
if "CONSTANT HEAD" in self.record_names:
"""
C-----CONSTANT-HEAD FLOW -- DON'T ACCUMULATE THE CELL-BY-CELL VALUES FOR
C-----CONSTANT-HEAD FLOW BECAUSE THEY MAY INCLUDE PARTIALLY CANCELING
C-----INS AND OUTS. USE CONSTANT-HEAD TERM TO IDENTIFY WHERE CONSTANT-
C-----HEAD CELLS ARE AND THEN USE FACE FLOWS TO DETERMINE THE AMOUNT OF
C-----FLOW. STORE CONSTANT-HEAD LOCATIONS IN ICH ARRAY.
"""
chd = self.cbc.get_data(
text="CONSTANT HEAD",
full3D=True,
kstpkper=kstpkper,
totim=totim,
)[0]
ich[np.ma.where(chd != 0.0)] = 1
if "FLOW RIGHT FACE" in self.record_names:
self._accumulate_flow_frf("FLOW RIGHT FACE", ich, kstpkper, totim)
if "FLOW FRONT FACE" in self.record_names:
self._accumulate_flow_fff("FLOW FRONT FACE", ich, kstpkper, totim)
if "FLOW LOWER FACE" in self.record_names:
self._accumulate_flow_flf("FLOW LOWER FACE", ich, kstpkper, totim)
if "SWIADDTOCH" in self.record_names:
swichd = self.cbc.get_data(
text="SWIADDTOCH", full3D=True, kstpkper=kstpkper, totim=totim
)[0]
swiich[swichd != 0] = 1
if "SWIADDTOFRF" in self.record_names:
self._accumulate_flow_frf("SWIADDTOFRF", swiich, kstpkper, totim)
if "SWIADDTOFFF" in self.record_names:
self._accumulate_flow_fff("SWIADDTOFFF", swiich, kstpkper, totim)
if "SWIADDTOFLF" in self.record_names:
self._accumulate_flow_flf("SWIADDTOFLF", swiich, kstpkper, totim)
# NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE
# ACCUMULATE THE FLOW BY ZONE
# iterate over remaining items in the list
for recname in self.ssst_record_names:
self._accumulate_flow_ssst(recname, kstpkper, totim)
# Compute mass balance terms
self._compute_mass_balance(kstpkper, totim)
return
def _add_empty_record(
self, recordarray, recname, kstpkper=None, totim=None
):
"""
Build an empty records based on the specified flow direction and
record name for the given list of zones.
Parameters
----------
recordarray :
recname :
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
recordarray : np.recarray
"""
if kstpkper is not None:
if len(self.cbc_times) > 0:
totim = self.cbc_times[self.cbc_kstpkper.index(kstpkper)]
else:
totim = 0.0
elif totim is not None:
if len(self.cbc_times) > 0:
kstpkper = self.cbc_kstpkper[self.cbc_times.index(totim)]
else:
kstpkper = (0, 0)
row = [totim, kstpkper[0], kstpkper[1], recname]
row += [0.0 for _ in self._zonenamedict.values()]
recs = np.array(tuple(row), dtype=recordarray.dtype)
recordarray = np.append(recordarray, recs)
return recordarray
def _initialize_budget_recordarray(self, kstpkper=None, totim=None):
"""
Initialize the budget record array which will store all of the
fluxes in the cell-budget file.
Parameters
----------
kstpkper : tuple
Tuple of kstp and kper to compute budget for (default is None).
totim : float
Totim to compute budget for (default is None).
Returns
-------
"""
# Create empty array for the budget terms.
dtype_list = [
("totim", "<f4"),
("time_step", "<i4"),
("stress_period", "<i4"),
("name", (str, 50)),
]
dtype_list += [
(n, self.float_type) for n in self._zonenamedict.values()
]
dtype = np.dtype(dtype_list)
recordarray = np.array([], dtype=dtype)
# Add "from" records
if "STORAGE" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "FROM_STORAGE", kstpkper, totim
)
if "CONSTANT HEAD" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "FROM_CONSTANT_HEAD", kstpkper, totim
)
for recname in self.ssst_record_names:
if recname != "STORAGE":
recordarray = self._add_empty_record(
recordarray,
"FROM_" + "_".join(recname.split()),
kstpkper,
totim,
)
for z, n in self._zonenamedict.items():
if z == 0 and 0 not in self.allzones:
continue
else:
recordarray = self._add_empty_record(
recordarray, "FROM_" + "_".join(n.split()), kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "TOTAL_IN", kstpkper, totim
)
# Add "out" records
if "STORAGE" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "TO_STORAGE", kstpkper, totim
)
if "CONSTANT HEAD" in self.record_names:
recordarray = self._add_empty_record(
recordarray, "TO_CONSTANT_HEAD", kstpkper, totim
)
for recname in self.ssst_record_names:
if recname != "STORAGE":
recordarray = self._add_empty_record(
recordarray,
"TO_" + "_".join(recname.split()),
kstpkper,
totim,
)
for z, n in self._zonenamedict.items():
if z == 0 and 0 not in self.allzones:
continue
else:
recordarray = self._add_empty_record(
recordarray, "TO_" + "_".join(n.split()), kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "TOTAL_OUT", kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "IN-OUT", kstpkper, totim
)
recordarray = self._add_empty_record(
recordarray, "PERCENT_DISCREPANCY", kstpkper, totim
)
return recordarray
@staticmethod
def _filter_circular_flow(fz, tz, f):
"""
Parameters
----------
fz
tz
f
Returns
-------
"""
e = np.equal(fz, tz)
fz = fz[np.logical_not(e)]
tz = tz[np.logical_not(e)]
f = f[np.logical_not(e)]
return fz, tz, f
def _update_budget_fromfaceflow(
self, fz, tz, f, kstpkper=None, totim=None
):
"""
Parameters
----------
fz
tz
f
kstpkper
totim
Returns
-------
"""
# No circular flow within zones
fz, tz, f = self._filter_circular_flow(fz, tz, f)
if len(f) == 0:
return
# Inflows
idx = tz != 0
fzi = fz[idx]
tzi = tz[idx]
rownames = ["FROM_" + self._zonenamedict[z] for z in fzi]
colnames = [self._zonenamedict[z] for z in tzi]
fluxes = f[idx]
self._update_budget_recordarray(
rownames, colnames, fluxes, kstpkper, totim
)
# Outflows
idx = fz != 0
fzi = fz[idx]
tzi = tz[idx]
rownames = ["TO_" + self._zonenamedict[z] for z in tzi]
colnames = [self._zonenamedict[z] for z in fzi]
fluxes = f[idx]
self._update_budget_recordarray(
rownames, colnames, fluxes, kstpkper, totim
)
return
def _update_budget_fromssst(self, fz, tz, f, kstpkper=None, totim=None):
"""
Parameters
----------
fz
tz
f
kstpkper
totim
Returns
-------
"""
if len(f) == 0:
return
self._update_budget_recordarray(fz, tz, f, kstpkper, totim)
return
def _update_budget_recordarray(
self, rownames, colnames, fluxes, kstpkper=None, totim=None
):
"""
Update the budget record array with the flux for the specified
flow direction (in/out), record name, and column.
Parameters
----------
rownames
colnames
fluxes
kstpkper
totim
Returns
-------
None
"""
try:
if kstpkper is not None:
for rn, cn, flux in zip(rownames, colnames, fluxes):
rowidx = np.where(
(self._budget["time_step"] == kstpkper[0])
& (self._budget["stress_period"] == kstpkper[1])
& (self._budget["name"] == rn)
)
self._budget[cn][rowidx] += flux
elif totim is not None:
for rn, cn, flux in zip(rownames, colnames, fluxes):
rowidx = np.where(
(self._budget["totim"] == totim)
& (self._budget["name"] == rn)
)
self._budget[cn][rowidx] += flux
except Exception as e:
print(e)
raise
return
def _accumulate_flow_frf(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.ncol >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW RIGHT FACE" COMPUTE FLOW BETWEEN ZONES ACROSS COLUMNS.
# COMPUTE FLOW ONLY BETWEEN A ZONE AND A HIGHER ZONE -- FLOW FROM
# ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4.
# 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K
k, i, j = np.where(
self.izone[:, :, 1:] > self.izone[:, :, :-1]
)
# Adjust column values to account for the starting position of "nz"
j += 1
# Define the zone to which flow is going
nz = self.izone[k, i, j]
# Define the zone from which flow is coming
jl = j - 1
nzl = self.izone[k, i, jl]
# Get the face flow
q = data[k, i, jl]
# Get indices where flow face values are positive (flow out of higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# FLOW BETWEEN NODE J,I,K AND J+1,I,K
k, i, j = np.where(
self.izone[:, :, :-1] > self.izone[:, :, 1:]
)
# Define the zone from which flow is coming
nz = self.izone[k, i, j]
# Define the zone to which flow is going
jr = j + 1
nzr = self.izone[k, i, jr]
# Get the face flow
q = data[k, i, j]
# Get indices where flow face values are positive (flow out of higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# Get indices where flow face values are negative (flow into higher zone)
# Don't include CH to CH flow (can occur if CHTOCH option is used)
# Create an iterable tuple of (from zone, to zone, flux)
# Then group tuple by (from_zone, to_zone) and sum the flux values
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[j > 0], i[j > 0], j[j > 0]
jl = j - 1
nzl = self.izone[k, i, jl]
nz = self.izone[k, i, j]
q = data[k, i, jl]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi[tzi != 0]]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[j < self.ncol - 1],
i[j < self.ncol - 1],
j[j < self.ncol - 1],
)
nz = self.izone[k, i, j]
jr = j + 1
nzr = self.izone[k, i, jr]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_fff(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.nrow >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW FRONT FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K
k, i, j = np.where(
self.izone[:, 1:, :] < self.izone[:, :-1, :]
)
i += 1
ia = i - 1
nza = self.izone[k, ia, j]
nz = self.izone[k, i, j]
q = data[k, ia, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K.
k, i, j = np.where(
self.izone[:, :-1, :] < self.izone[:, 1:, :]
)
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[i > 0], i[i > 0], j[i > 0]
ia = i - 1
nza = self.izone[k, ia, j]
nz = self.izone[k, i, j]
q = data[k, ia, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[i < self.nrow - 1],
i[i < self.nrow - 1],
j[i < self.nrow - 1],
)
nz = self.izone[k, i, j]
ib = i + 1
nzb = self.izone[k, ib, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_flf(self, recname, ich, kstpkper, totim):
"""
Parameters
----------
recname
ich
kstpkper
totim
Returns
-------
"""
try:
if self.nlay >= 2:
data = self.cbc.get_data(
text=recname, kstpkper=kstpkper, totim=totim
)[0]
# "FLOW LOWER FACE"
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1
k, i, j = np.where(
self.izone[1:, :, :] < self.izone[:-1, :, :]
)
k += 1
ka = k - 1
nza = self.izone[ka, i, j]
nz = self.izone[k, i, j]
q = data[ka, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1
k, i, j = np.where(
self.izone[:-1, :, :] < self.izone[1:, :, :]
)
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
self._update_budget_fromfaceflow(
fzi, tzi, np.abs(fi), kstpkper, totim
)
# CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION
k, i, j = np.where(ich == 1)
k, i, j = k[k > 0], i[k > 0], j[k > 0]
ka = k - 1
nza = self.izone[ka, i, j]
nz = self.izone[k, i, j]
q = data[ka, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
k, i, j = np.where(ich == 1)
k, i, j = (
k[k < self.nlay - 1],
i[k < self.nlay - 1],
j[k < self.nlay - 1],
)
nz = self.izone[k, i, j]
kb = k + 1
nzb = self.izone[kb, i, j]
q = data[k, i, j]
idx = np.where(
(q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["FROM_CONSTANT_HEAD"] * len(tzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
idx = np.where(
(q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1))
)
fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx])
fz = ["TO_CONSTANT_HEAD"] * len(fzi)
tz = [self._zonenamedict[z] for z in tzi]
self._update_budget_fromssst(
fz, tz, np.abs(f), kstpkper, totim
)
except Exception as e:
print(e)
raise
return
def _accumulate_flow_ssst(self, recname, kstpkper, totim):
# NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE
# ACCUMULATE THE FLOW BY ZONE
imeth = self.imeth[recname]
data = self.cbc.get_data(text=recname, kstpkper=kstpkper, totim=totim)
if len(data) == 0:
# Empty data, can occur during the first time step of a transient
# model when storage terms are zero and not in the cell-budget
# file.
return
else:
data = data[0]
if imeth == 2 or imeth == 5:
# LIST
qin = np.ma.zeros(
(self.nlay * self.nrow * self.ncol), self.float_type
)
qout = np.ma.zeros(
(self.nlay * self.nrow * self.ncol), self.float_type
)
for [node, q] in zip(data["node"], data["q"]):
idx = node - 1
if q > 0:
qin.data[idx] += q
elif q < 0:
qout.data[idx] += q
qin = np.ma.reshape(qin, (self.nlay, self.nrow, self.ncol))
qout = np.ma.reshape(qout, (self.nlay, self.nrow, self.ncol))
elif imeth == 0 or imeth == 1:
# FULL 3-D ARRAY
qin = | np.ma.zeros(self.cbc_shape, self.float_type) | numpy.ma.zeros |
import pickle
import numpy as np
def fetch_file(path):
with open(path, 'rb') as fp:
return pickle.load(fp)
def fetch_adj_mat(column):
if column == 0:
return A1
elif column == 1:
return A2
elif column == 2:
return A3
# elif column == 3:
# return A4
print("Fetching files...")
A1 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
A2 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
A3 = np.array(
fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/adjacency_matrix_normal/definition_2/A1_fc.txt"))
# A4 = np.array(fetch_file(RELATIVE_PATH + ADJACENCY_MATRIX + "A4_fc.txt"))
influence_matrix = np.array(fetch_file(
"/home/imlegend19/PycharmProjects/Research - Data Mining/gnome/influence_matrix_normal/definition_2/"
"influence_matrix_fc.txt"))
print(influence_matrix.shape)
krp = []
for i in range(3):
wa1 = A1 * influence_matrix[i][0]
wa2 = A2 * influence_matrix[i][1]
wa3 = A3 * influence_matrix[i][2]
# wa4 = A4 * influence_matrix_normal[i][3]
print(influence_matrix[i][0])
print(influence_matrix[i][1])
print(influence_matrix[i][2])
# print(influence_matrix_normal[i][3])
for j in range(1134):
row = []
row.extend(wa1[j])
row.extend(wa2[j])
row.extend(wa3[j])
# row.extend(wa4[j])
krp.append(row)
print("Clearing variables...")
A1 = None
A2 = None
A3 = None
# A4 = None
influence_matrix = None
print("Setting up kr_product...")
kr_product = | np.array(krp, dtype=np.float) | numpy.array |
"""
Experimental-SOM like but completely pixel-to-gene clustering
"""
import random, math
import operator
import numpy
import matplotlib.pyplot as plot
from . import config
valid_init_methods = set(["random", ])
valid_metrics = set(["sum_of_deltas", ]) #
class rigidgrid:
def __init__(self, numpy_data):
"""
**Purpose**
Setup the rigid grid
**Arguments**
numpy_data
Table of expression data, rows are genes (pixels), columns are samples
"""
self.data = numpy_data
def init(self, method="random", iterations=10, metric='sum_of_deltas', seed=1234):
"""
**Purpose**
Initialise the starting setting for the RG.
**Arguments**
method (Optional, default=random)
Specify the method to determine the initial starting conditions
Valid methods:
random - randomly arrange the points in the grid.
seed (Optional, default=1234)
seed for the random number generator (if required)
iterations (Optional, default=1000)
number of iterations to plow through
metric (Optional, default='quantization_error')
metric to use to compare between two pixels for similarity.
Valid metrics:
stdev - Sum of the differences.
"""
assert method in valid_init_methods, "'%s' method not found" % method
assert metric in valid_metrics, "'%s' metric not found" % metric
random.seed(seed)
self.method = method
self.iterations = iterations
self.metric = metric
self.sq = int(math.ceil(math.sqrt(self.data.shape[0]))) # The size of the square grid required
self.samples_required = self.sq * self.sq
config.log.info("rigidgrid(): grid set to %s*%s" % (self.sq, self.sq))
self.grid = numpy.zeros([self.sq, self.sq], dtype=int) # idxs of current genes
if len(self.data) < self.samples_required:
to_add = self.samples_required - len(self.data)
config.log.info("rigidgrid(): I need to add %s pseudo samples to form a perfect square" % to_add)
self.data = numpy.vstack((self.data, | numpy.zeros([to_add, self.data.shape[1]]) | numpy.zeros |
"""
Finds faces of 3d form, given the perimeter and foldlines
"""
#From http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
import math
import numpy
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def calc_angle(v1, v2):
return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
#From http://stackoverflow.com/questions/20023209/python-function-for-rotating-2d-objects
def rotatePolygon(polygon,theta):
"""Rotates the given polygon which consists of corners represented as (x,y),
around the ORIGIN, clock-wise, theta degrees"""
theta = math.radians(theta)
if polygon[polygon.index([0,0])+1][1]>0:
theta=-theta
rotatedPolygon = []
for corner in polygon :
rotatedPolygon.append(( corner[0]*math.cos(theta)-corner[1]*math.sin(theta) , corner[0]*math.sin(theta)+corner[1]*math.cos(theta)) )
return rotatedPolygon
def face_finder(maincontour, foldlines):
#takes in a list of points of the main contour, and a list of point pairs giving fold lines
#Flip fold lines, work out why
for line in enumerate(foldlines):
index=line[0]
foldline=line[1]
foldlines[index]=[foldline[1],foldline[0]]
#dictionary of links, both contour and folded
connectionDict={}
for point in enumerate(maincontour):
index=point[0]
connections=[tuple(maincontour[index-1]),tuple(maincontour[(index+1)%(len(maincontour))])]
for line in foldlines:
if point[1] in line:
connections.insert(0,tuple(line[line.index(point[1])-1]))
connectionDict[point[1]]=connections
#collect a list of all faces (with duplicates)
faceLists=[]
for point in maincontour:
face=(breadth_first(point, connectionDict)[:-1]) #for some reason breadth first seach includes the startpoint twice. Easiest fix!
faceLists.append(face)
#uses set notation to drop extras
culledFaceLists=[]
faceSets=[]
for face in faceLists:
if set(face) not in faceSets:
faceSets.append(set(face))
culledFaceLists.append(face)
rotatedFaceLists=[] #the normalized and translated faces
regularFaceLists=[] #the original lists, but flipped if needed2
#count fold lines to see if it's a base or not
thingsToRemove=[]
for face in culledFaceLists:
faceindex=culledFaceLists.index(face)
totalfoldlines=0
for point in enumerate(face):
point2=point[1]
index=point[0]
point1=face[index-1]
if [point1,point2] in foldlines or [point2,point1] in foldlines:
totalfoldlines+=1
#If it's a base, FOR NOW just remove it
if totalfoldlines>=2:
thingsToRemove.append(faceindex)
rotatedFace=[(14.129948664096155, 174.25654808571758), (-0.0, 0.0), (182.02472359545007, -8.43769498715119e-15), (191.0729449989362, 179.1734625703078)]
#If it's a regular side
if totalfoldlines == 1:
alreadyRotated=False
for point in enumerate(face):
if alreadyRotated:
break
point2=point[1]
index=point[0]
point1=face[index-1]
if [point1,point2] in foldlines:
xfactor=point1[0]
yfactor=point1[1]
newpoint2=[point2[0]-xfactor,point2[1]-yfactor]
angle=math.degrees(calc_angle(newpoint2,[1,0]))
newface=[]
for point in face:
newpoint=[point[0]-xfactor,point[1]-yfactor]
newface.append(newpoint)
rotatedFace=rotatePolygon(newface,angle)
alreadyRotated=True
elif [point2,point1] in foldlines:
xfactor=point2[0]
yfactor=point2[1]
newpoint1=[point1[0]-xfactor,point1[1]-yfactor]
angle=math.degrees(calc_angle(newpoint1,[1,0]))
newface=[]
for point in face:
newpoint=[point[0]-xfactor,point[1]-yfactor]
newface.append(newpoint)
rotatedFace=rotatePolygon(newface,angle)
alreadyRotated=True
uprightFace=[]
upsideDown=False
backwards=False
for point in rotatedFace:
newpoint=list(point)
for coord in enumerate(newpoint):
if (coord[1] < 1e-5 and coord[1] > -1e-5):
newpoint[coord[0]]=0
if newpoint[1]<0:
upsideDown=True
if rotatedFace[rotatedFace.index((0.0,0.0))+1][0] < 0:
backwards=True
for point in rotatedFace:
newpoint=list(point)
for coord in enumerate(newpoint):
if (coord[1] < 1e-5 and coord[1] > -1e-5):
newpoint[coord[0]]=0
if backwards:
newx=newpoint[0]*-1
else:
newx = newpoint[0]
if upsideDown:
newy=newpoint[1]*-1
else:
newy=newpoint[1]
newpoint=[newx,newy]
uprightFace.append(newpoint)
rotatedFaceLists.append(uprightFace)
#Set 0,0 and the corresponding point at the correct spot
zeroindex=uprightFace.index([0,0])
shifter=(-zeroindex)
originalShifter=shifter-1
#
rotatedFaceLists[faceindex]=numpy.array(numpy.roll(rotatedFaceLists[faceindex],shifter,axis=0)).tolist()
culledFaceLists[faceindex]=numpy.array(numpy.roll(culledFaceLists[faceindex],originalShifter,axis=0)).tolist()
rotatedFaceLists[faceindex]=[rotatedFaceLists[faceindex][0]]+rotatedFaceLists[faceindex][1:][::-1]
if (upsideDown and backwards) or (not upsideDown and not backwards): #Two opposite direction flips or none
culledFaceLists[faceindex]= | numpy.array(culledFaceLists[faceindex][::-1]) | numpy.array |
from bw2calc.errors import (
OutsideTechnosphere,
NonsquareTechnosphere,
EmptyBiosphere,
InconsistentGlobalIndex,
)
from bw2calc.lca import LCA
from pathlib import Path
import bw_processing as bwp
import json
import numpy as np
import pytest
from collections.abc import Mapping
fixture_dir = Path(__file__).resolve().parent / "fixtures"
######
### Basic functionality
######
def test_example_db_basic():
mapping = dict(json.load(open(fixture_dir / "bw2io_example_db_mapping.json")))
print(mapping)
packages = [
fixture_dir / "bw2io_example_db.zip",
fixture_dir / "ipcc_simple.zip",
]
lca = LCA(
{mapping["Driving an electric car"]: 1},
data_objs=packages,
)
lca.lci()
lca.lcia()
assert lca.supply_array.sum()
assert lca.technosphere_matrix.sum()
assert lca.score
def test_basic():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
answer = np.zeros((2,))
answer[lca.dicts.activity[101]] = 1
answer[lca.dicts.activity[102]] = 0.5
assert np.allclose(answer, lca.supply_array)
def test_basic_negative_production():
pass
def test_basic_substitution():
pass
def test_basic_nonunitary_production():
pass
def test_circular_inputs():
pass
######
### __init__
######
def test_invalid_datapackage():
packages = ["basic_fixture.zip"]
with pytest.raises(TypeError):
LCA({1: 1}, data_objs=packages)
def test_demand_not_mapping():
packages = [fixture_dir / "basic_fixture.zip"]
with pytest.raises(ValueError):
LCA((1, 1), data_objs=packages)
def test_demand_mapping_but_not_dict():
class M(Mapping):
def __getitem__(self, key):
return 1
def __iter__(self):
return iter((1,))
def __len__(self):
return 1
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA(M(), data_objs=packages)
lca.lci()
answer = np.zeros((2,))
answer[lca.dicts.activity[101]] = 1
answer[lca.dicts.activity[102]] = 0.5
assert np.allclose(answer, lca.supply_array)
######
### __next__
######
def test_next_data_array():
packages = [fixture_dir / "array_sequential.zip"]
lca = LCA({1: 1}, data_objs=packages, use_arrays=True)
lca.lci()
lca.lcia()
for x in range(1, 5):
assert lca.biosphere_matrix.sum() == x
next(lca)
def test_next_only_vectors():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
lca.lcia()
current = lca.characterized_inventory.sum()
next(lca)
assert lca.characterized_inventory.sum() == current
def test_next_plain_monte_carlo():
packages = [
fixture_dir / "mc_basic.zip",
]
mc = LCA({3: 1}, data_objs=packages, use_distributions=True)
mc.lci()
mc.lcia()
first = mc.score
next(mc)
assert first != mc.score
def test_next_monte_carlo_as_iterator():
packages = [
fixture_dir / "mc_basic.zip",
]
mc = LCA({3: 1}, data_objs=packages, use_distributions=True)
mc.lci()
mc.lcia()
for _, _ in zip(mc, range(10)):
assert mc.score > 0
def test_next_monte_carlo_all_matrices_change():
packages = [
fixture_dir / "mc_basic.zip",
]
mc = LCA({3: 1}, data_objs=packages, use_distributions=True)
mc.lci()
mc.lcia()
a = [
mc.technosphere_matrix.sum(),
mc.biosphere_matrix.sum(),
mc.characterization_matrix.sum(),
]
next(mc)
b = [
mc.technosphere_matrix.sum(),
mc.biosphere_matrix.sum(),
mc.characterization_matrix.sum(),
]
print(a, b)
for x, y in zip(a, b):
assert x != y
######
### build_demand_array
######
def test_build_demand_array():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
assert lca.demand_array.shape == (2,)
assert lca.demand_array.sum() == 1
assert lca.demand_array[lca.dicts.product[1]] == 1
def test_build_demand_array_pass_dict():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
lca.build_demand_array({2: 5})
assert lca.demand_array.shape == (2,)
assert lca.demand_array.sum() == 5
assert lca.demand_array[lca.dicts.product[2]] == 5
def test_build_demand_array_outside_technosphere():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({100: 1}, data_objs=packages)
with pytest.raises(OutsideTechnosphere):
lca.lci()
def test_build_demand_array_activity_not_product():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({101: 1}, data_objs=packages)
with pytest.raises(ValueError):
lca.lci()
def test_build_demand_array_pass_object():
packages = [fixture_dir / "basic_fixture.zip"]
class Foo:
pass
obj = Foo()
with pytest.raises(ValueError):
LCA(obj, data_objs=packages)
######
### load_lci_data
######
def test_load_lci_data():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
tm = np.array([[1, 0], [-0.5, 1]])
assert np.allclose(lca.technosphere_matrix.toarray(), tm)
assert lca.dicts.product[1] == 0
assert lca.dicts.product[2] == 1
assert lca.dicts.activity[101] == 0
assert lca.dicts.activity[102] == 1
assert lca.dicts.biosphere[1] == 0
def test_load_lci_data_nonsquare_technosphere():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5, 2, 3])
indices_array = np.array(
[(1, 101), (2, 102), (2, 101), (3, 101), (3, 102)], dtype=bwp.INDICES_DTYPE
)
flip_array = np.array([0, 0, 1, 1, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
lca = LCA({1: 1}, data_objs=[dp])
with pytest.raises(NonsquareTechnosphere):
lca.lci()
# lca.lci()
# tm = np.array([
# [1, 0],
# [-0.5, 1],
# [-2, -3]
# ])
# assert np.allclose(lca.technosphere_matrix.toarray(), tm)
# assert lca.dicts.product[1] == 0
# assert lca.dicts.product[2] == 1
# assert lca.dicts.product[3] == 2
# assert lca.dicts.activity[101] == 0
# assert lca.dicts.activity[102] == 1
def test_load_lci_data_empty_biosphere_warning():
lca = LCA({1: 1}, data_objs=[fixture_dir / "empty_biosphere.zip"])
with pytest.warns(UserWarning):
lca.lci()
######
### remap_inventory_dicts
######
def test_remap_inventory_dicts():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA(
{1: 1},
data_objs=packages,
remapping_dicts={"product": {1: ("foo", "bar")}, "biosphere": {1: "z"}},
)
lca.lci()
lca.remap_inventory_dicts()
tm = np.array([[1, 0], [-0.5, 1]])
assert np.allclose(lca.technosphere_matrix.toarray(), tm)
assert lca.dicts.product[("foo", "bar")] == 0
assert lca.dicts.product[2] == 1
assert lca.dicts.activity[101] == 0
assert lca.dicts.activity[102] == 1
assert lca.dicts.biosphere["z"] == 0
######
### load_lcia_data
######
def test_load_lcia_data():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
lca.lcia()
cm = np.array([[1]])
assert np.allclose(lca.characterization_matrix.toarray(), cm)
def test_load_lcia_data_multiple_characterization_packages():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2, 3])
indices_array = np.array([(1, 101), (2, 102), (3, 101)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(1, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
data_array = np.array([2])
indices_array = np.array([(3, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
cm = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 2]])
assert np.allclose(lca.characterization_matrix.toarray(), cm)
assert lca.dicts.biosphere[1] == 0
assert lca.dicts.biosphere[2] == 1
assert lca.dicts.biosphere[3] == 2
def test_load_lcia_data_inconsistent_globals():
# Activities: 101, 102
# Products: 1, 2
# Biosphere flows: 201, 202
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(201, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
data_array = np.array([10])
indices_array = np.array([(202, 1)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=1,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
with pytest.raises(InconsistentGlobalIndex):
lca.lcia()
def test_load_lcia_data_none_global_value():
# Should include all because no filter
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(201, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=None,
nrows=1,
)
data_array = np.array([10])
indices_array = np.array([(202, 1)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=None,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
assert lca.characterization_matrix.sum() == 11
def test_load_lcia_data_nonglobal_filtered():
# Activities: 101, 102
# Products: 1, 2
# Biosphere flows: 201, 202
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = | np.array([1]) | numpy.array |
import os.path
import time
import numpy as np
import pickle
import PC2ImageConverter
import matplotlib.pyplot as plt
from visualizer import Vis
def decomposeCloud(rawCloud, verbose=False):
# decompose cloud
backgrdPoints = []
roadPoints = []
vehPoints = []
pedPoints = []
cycPoints = []
for i in range(0, len(rawCloud)):
objClass = rawCloud[i, 4]
if objClass == "road":
roadPoints.append(rawCloud[i,:])
elif objClass == "car":
vehPoints.append(rawCloud[i,:])
elif objClass == "person":
pedPoints.append(rawCloud[i,:])
elif objClass == "cyclist":
cycPoints.append(rawCloud[i,:])
elif objClass == "None":
backgrdPoints.append(rawCloud[i,:])
backgrdCloud = np.asarray(backgrdPoints)
roadCloud = np.asarray(roadPoints)
vehCloud = np.asarray(vehPoints)
pedCloud = np.asarray(pedPoints)
cycCloud = np.asarray(cycPoints)
if verbose:
print ("background cloud: " + str(backgrdCloud.shape))
print ("roadCloud cloud: " + str(roadCloud.shape))
print ("vehCloud cloud: " + str(vehCloud.shape))
print ("pedCloud cloud: " + str(pedCloud.shape))
print ("cycCloud cloud: " + str(cycCloud.shape))
return backgrdCloud, roadCloud, vehCloud, pedCloud, cycCloud
def loadBoundingBox(boundingBox):
with open(boundingBox,'rb') as f:
return pickle.load(f,encoding='bytes')
def parseBB3D(curr_path, bb3D_path):
'''
_BOundingbox : n* [ label_type,
[ [x1,x2,x3,x4,x5,x6,x7,x8],
[y1, ,,, ,,, ,,, ,,, ,y8],
[z1, ... ... ... ... ,z8]
]
]
for BoundingBox, x,y,z are in image coordinate
'''
pathName, tempName = os.path.split(curr_path)
currFileName, _ = tempName.split(".")
bbFileName = bb3D_path + currFileName.replace('full_label', 'bb3d') + ".bin"
print(bbFileName)
boundingbox_3d = []
if os.path.exists(bbFileName):
boundingbox_3d = loadBoundingBox(bbFileName)
else:
print ("ERROR: BB_3D file does not exist " + str(bbFileName))
return None
return boundingbox_3d
def insertLabelColumn(inputCloud):
""" we insert an additional column representing the label id as int"""
columnList = []
for p in range(0, len(inputCloud)):
label = inputCloud[p, 4]
if label == 'None':
columnList.append(0)
elif label == 'road':
columnList.append(1)
elif label == 'car':
columnList.append(2)
elif label == 'person':
columnList.append(3)
elif label == 'cyclist':
columnList.append(4)
newColumn = np.asarray(columnList)
inputCloud = | np.insert(inputCloud, 5, newColumn, axis=1) | numpy.insert |
import numpy as np
from .multichannel_iterator import MultiChannelIterator
from scipy.ndimage import gaussian_filter
def open_channel(dataset, channel_keyword, group_keyword=None, size=None):
iterator = MultiChannelIterator(dataset = dataset, channel_keywords=[channel_keyword], group_keyword=group_keyword, input_channels=list(np.arange(len(channel_keyword))) if isinstance(channel_keyword, (list, tuple)) else [0], output_channels=[], batch_size=1 if size is None else size, shuffle=False)
if size is None:
iterator.batch_size=len(iterator)
data = iterator[0]
iterator._close_datasetIO()
return data
def get_min_and_max(dataset, channel_keyword, group_keyword=None, batch_size=1):
iterator = MultiChannelIterator(dataset = dataset, channel_keywords=[channel_keyword], group_keyword=group_keyword, output_channels=[], batch_size=batch_size)
vmin = float('inf')
vmax = float('-inf')
for i in range(len(iterator)):
batch = iterator[i]
vmin = min(batch.min(), vmin)
vmax = max(batch.max(), vmax)
iterator._close_datasetIO()
return vmin, vmax
def get_histogram(dataset, channel_keyword, bins, bin_size=None, sum_to_one=False, group_keyword=None, batch_size=1, return_min_and_bin_size=False, smooth_scale = 0, smooth_scale_in_bin_unit=True):
iterator = MultiChannelIterator(dataset = dataset, channel_keywords=[channel_keyword], group_keyword=group_keyword, output_channels=[], batch_size=batch_size)
if bins is None:
assert bin_size is not None
vmin, vmax = get_min_and_max(dataset, channel_keyword, batch_size=batch_size)
n_bins = round( (vmax - vmin) / bin_size )
bin_size = (vmax - vmin) / n_bins
bins = np.linspace(vmin, vmax, num=n_bins+1)
if isinstance(bins, int):
vmin, vmax = get_min_and_max(dataset, channel_keyword, batch_size=batch_size)
bin_size = (vmax - vmin)/bins
bins = np.linspace(vmin, vmax, num=bins+1)
histogram = None
for i in range(len(iterator)):
batch = iterator[i]
histo, _ = | np.histogram(batch, bins) | numpy.histogram |
#!/usr/bin/env python
# <NAME>
# Dec 2017
# A set of functions for plotting dvmdostem ouputs.
import os
import glob
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import netCDF4 as nc
import collections
def get_last_n_eq(var, timeres='yearly', fileprefix='', n=10):
'''
Work in progress for getting the last few year of equlibrium stage for calibration.
'''
fname = os.path.join(fileprefix, '{}_{}_eq.nc'.format(var.upper(), timeres.lower()))
if not os.path.exists(fname):
raise RuntimeError("Can't find file: {}".format(fname))
with nc.Dataset(fname) as ds:
data = ds.variables[var.upper()][-n:]
info = list(zip(data.shape, list(ds.dimensions.keys())))
#info = [(name, dim.size) for name, dim in ds.dimensions.iteritems()]
return data, info
def sum_monthly_flux_to_yearly(data):
'''
Expects `data` to be at least a 1D array, with the first axis being time.
Also assumes that the time axis starts and stops on Jan 1 and Dec 31. In
otherwords, if you had a 2 years of monthly data that ran from Aug 1 thru
July 31, this function would NOT work!
'''
if (data.shape[0] % 12) != 0:
raise RuntimeError('data size for dimension 0 (time) must be an even multiple of 12')
newshape = [data.shape[0] // 12]
for i in data.shape[1:]:
newshape.append(i)
yearly_data = np.zeros((newshape)) * np.nan
for i in range(0, newshape[0]):
yr_start = (i * 12)
yr_end = (i * 12) + 12
yearly_data[i] = np.sum(data[yr_start:yr_end], axis=0)
return yearly_data
def sum_across_compartments(data):
'''
Expects `data` to be a 5D numpy array with dimensions
`(time, pftpart, pft, y, x)`.
Returns a 4D array that has been summed over the `pftpart` dimension,
effectively creating a new array with totals for a PFT instead of having
values split by compartment.
'''
if len(data.shape) != 5:
raise RuntimeError('data input parameter must have 5 dimensions')
return np.sum(data, axis=1)
def sum_across_pfts(data):
'''
Expects `data` to be a 4D numpy array with dimensions
`(time, pft, y, x)`.
Returns a 3D array that has been summed over the `pft` dimension, effectively
creating a new array with totals for an entire gridcell/pixel instead of
having the totals broken out by PFT.
'''
if len(data.shape) != 4:
raise RuntimeError('data input parameter must have 4 dimensions')
return np.sum(data, axis=1)
def sum_across_layers(data):
'''
Expects `data` to be a 4D numpy array with dimensions
`(time, layers, y, x)`.
Returns a 3D array that has been summed over the `layer` dimension,
effectively creating a new array with totals for an entire gridcell/pixel
instead of having the totals broken out by layer.
'''
if len(data.shape) != 4:
raise RuntimeError('data input parameter must have 4 dimensions')
return np.sum(data, axis=1)
def average_monthly_pool_to_yearly(data):
'''
Expects `data` to be a 3D, 4D, or 5D numpy MASKED array with dimensions
`(time, layers, y, x)`, `(time, pft, y, x)` or `(time, pftpart, pft, y, x)`
and the size of the time dimension to be an even multiple of 12. The calling
client is expected to set up the mask correctly, i.e. if data is read from a
netcdf file, then using the _FillValue for the mask.
The function will compute the average of the input array along the time
dimension in strides of 12.
Returns a 3D, 4D, or 5D numpy array with dimensions e.g. `(time, layers, y, x)`,
(same as input), but the length of the returned time dimension will be
1/12th of the length of the input array.
Note: this function contains a loop and could probably be vectorized somehow
with some fancy numpy indexing.
Examples:
Load a monthly file with soil layer data and convert it to yearly.
>>> import numpy as np
>>> import netCDF4 as nc
>>> import scripts.output_utils as ou
>>>
>>> soc = nc.Dataset('all-merged/SOC_monthly_tr.nc')
>>> a = np.ma.masked_values(soc.variables['SOC'][:], soc.variables['SOC']._FillValue)
>>> a = np.ma.masked_values(a, -99999)
>>>
>>> b = average_monthly_pool_to_yearly(a)
>>> print a.shape, b.shape
(1308, 22, 10, 10) (109, 22, 10, 10)
'''
if len(data.shape) != 4:
raise RuntimeError('data input parameter must have 4 dimensions.')
if data.shape[0] % 12 > 0:
raise RuntimeError('data input parameter first dimension (time) must be evenly divisible by 12')
if not isinstance(data, np.ma.core.MaskedArray):
raise RuntimeError('data input parameter must be a numpy masked array!')
original_dims = list(data.shape)
new_time = original_dims[0] // 12
new_dims = original_dims
new_dims[0] = new_time
new_dims = [round(x) for x in new_dims]
output = np.ones(new_dims) * np.nan
for i in range(0, new_time):
yr_start = i * 12
yr_end = i*12 + 12
output[i] = data[yr_start:yr_end].mean(axis=0)
return output
def stitch_stages(var, timestep, stages, fileprefix=''):
'''
Expects `var` to be one the dvmdostem output variable names. `timestep` must
be one of "yearly" or "monthly", and stages is a (ordered) list containing
one or more of "pr","eq","sp","tr","sc". `fileprefix` is an optional path
that will be pre-pended to the filenames for opening files in a different
directory.
Outputs from dvmdostem assume one variable per file and the following
file-naming pattern:
`var`_`timestep`_`stage`.nc
This function makes the following additional assumptions about the files:
- All files for different timesteps have the same dimensions
- The file's variable name is the same as the variable name in the filename.
- There is a units attribute?
Returns a tuple (`data`, `units`), where `data` is a multi-dimensional numpy
array that is the concatenation of the input arrays along the time axis and
`units` is the unit string that is found in the input netcdf files.
'''
expected_file_names = ["{}_{}_{}.nc".format(var, timestep, stg) for stg in stages]
expected_file_names = [os.path.join(fileprefix, i) for i in expected_file_names]
full_ds = np.array([])
units_str = ''
for i, exp_file in enumerate(expected_file_names):
print("Trying to open: ", exp_file)
with nc.Dataset(exp_file, 'r') as f:
#print f.variables[var].units
if i == 0:
full_ds = f.variables[var][:]
units_str = f.variables[var].units
else:
full_ds = np.concatenate( (full_ds, f.variables[var][:]), axis=0 )
if f.variables[var].units != units_str:
raise RuntimeError("Something is wrong with your input files! Units don't match!")
return (full_ds, units_str)
def check_files(fnames):
'''
A work in progress for verifying assumptions about input files, `fnames`.
Prints messages to stdout.
Returns `None`.
'''
def get_dims(afile):
'''
Attempts to open `afile` as a netCDF file and read dimensions.
Prints message for RuntimeErrors resulting from opening and reading file.
Returns a list of dimensions in the file.
'''
try:
with nc.Dataset(afile, 'r') as f:
dims = f.variables[var].dimensions
except RuntimeError as e:
print("RuntimeError! Problem opening/reading: {} message: {}".format(afile, e.message))
dims = None
return dims
dims_for_each_file = [get_dims(f) for f in fnames]
if len(set(dims_for_each_file)) > 1:
print("Warning! Not all files have the same dims!")
if set(dims_for_each_file).pop() is None:
print("Warning! At least one file doesn't even have dimensions!")
if len(set(dims_for_each_file).pop()) > 0:
first_item = set(dims_for_each_file).pop()
if first_item[0].lower() != 'time':
print("Warning! It appears that the first dimension is not time!")
else:
print("Warning! No dimensions, can't check for time as first dimension!")
def mask_by_cmt(data, cmtnum, vegmap_filepath):
'''
Expects `data` to be at least a 2D array with the last two dimensions being
(y, x). `cmtnum` is the community type number that will remain unmasked
in the final data. `vegmap_filepath` should be the path to dvmdostem input
vegetation map file, which has dimensions (y,x) and an integer specifying
the CMT number for each pixel, under the variable 'veg_class'.
Returns a numpy masked array with the same dimensions as `data`. In the
returned array, data for pixels in `vegmap_filepath` equal to `cmtnum` will
unmasked.
Example:
# An nd veg mask
# Assumed that the shape of data is either
# 3D (time, y, x),
# 4D (time, pft, y, x) or (time, layer, y, x) or
# 5D (time, pftpart, pft, y, x),
# and that in any case, the last two dimensions are y and x
# For example:
In [142]: ba = np.random.randint(0,100,(2,3,4,5,5))
In [143]: np.broadcast_to(np.random.randint(0,2,(5,5)), ba.shape).shape
Out[143]: (2, 3, 4, 5, 5)
'''
vegmap = nc.Dataset(vegmap_filepath, 'r')
vegmask = np.ma.masked_not_equal(vegmap.variables['veg_class'][:], cmtnum)
vmnd_mask = np.broadcast_to(vegmask.mask, data.shape)
# Full data, masked by veg type
vmnd_all = np.ma.masked_array(data, vmnd_mask)
return vmnd_all
def mask_by_failed_run_status(data, run_status_filepath="run_status.nc"):
'''
Masks out any data for which the run status is < 0 in the `run_status_filepath`.
`data`: (numpy.ndarray) must have at least 2 dimensions (y, x) and they must
be the last dimensions.
`run_status_file`: (str) path to a dvmdostem run_status.nc file that has
dimensions (y,x) and single variable run_status(y,x) that has positive values
for successfully run pixels and negative values for failed pixels.
Returns a numpy masked array the same shape as `data` with all the data for
failed pixels masked out.
Example: see mask_by_cmt(...)
'''
runstat = nc.Dataset(run_status_filepath)
runstatmask = np.ma.masked_less(runstat.variables['run_status'][:], 0)
rsnd_mask = np.broadcast_to(runstatmask.mask, data.shape)
rsnd_all = np.ma.masked_array(data, rsnd_mask)
return rsnd_all
def plot_comp_sst():
ROWS=4; COLS=4
gs = gridspec.GridSpec(ROWS, COLS)
for i, cmt in enumerate([4,5,6,7]):
ax = plt.subplot(gs[i,:])
plot_spatial_summary_timeseries('VEGC', 'yearly', cmt, 'tr sc'.split(' '), "vegetation.nc", "run_status.nc", ax=ax)
plt.tight_layout()
plt.show(block=True)
def plot_basic_timeseries(vars2plot, spatial_y, spatial_x, time_resolution, stages, folder):
'''
Make a basic timeseries plot, one subplot per variable.
Not sure yet how should handle summarizing over layers, pfts, etc.
Assembles expected file name for data based on args. i.e. GPP_monthly_eq.nc
Raises FileNotFoundError if files does not exist.
Parameters
----------
vars2plot : list of variable names
spatial_y : pixel coordinates to plot i.e. 0
spatial_x : pixel coordinates to plot i.e. 0
time_resolution : str, 'yearly', 'monthly', or 'daily'
stages: list of stages to plot e.g. ['eq', 'sp', 'tr', 'sc']
folder: str, path to directory with output data files
'''
ROWS = len(vars2plot)
COLS = 1
gs = gridspec.GridSpec(ROWS, COLS)
for i, var in enumerate(vars2plot):
ax = plt.subplot(gs[i,:])
data, units = stitch_stages(var, time_resolution, stages, folder)
print(data.shape)
ax.plot(data[:,spatial_y, spatial_x], label=var)
ax.set_ylabel = units
plt.savefig("plot_basic_timeseries.png")
plt.show()
def plot_spatial_summary_timeseries(var, timestep, cmtnum, stages, ref_veg_map, ref_run_status, ax=None):
'''
Plots a single line with min/max shading representing the `var` averaged over
the spatial dimension, considering only pixels for `cmtnum`. Stitches together
data for the specified `stages`.
`var`: (string) must be one of the dvmdostem output variables (upper case).
`timesteps`: (string) must be one of "monthly" or "yearly".
`cmtnum`: (int) must be a number found in the veg map, see `mask_by_cmt(..)`.
`stages`: (list) must contain one or more of "pr","eq","sp","tr","sc".
`ref_veg_map`: (str) must be a file path to a dvmdostem vegetation input map
with dimensions (y, x) and a single variable veg_class(y,x) with a number for
representing the community type for that pixel.
`ref_run_status`: (str) must be a file path to a dvmdostem run_status.nc map
with dimensions (y,x) and a single variable run_status(y, with a number for
how the pixel completed its run.
`ax`: (matplotlib.axes._subplots.AxesSubplot instance) will plot line(s) on
this axes instance. If ax in None, then will create (and show) a new figure
and plot.
Attempts to find the requsite files for `var`, `timestep` and `stages`.
Plots a timeseries of variable `var` after averaging over the spatial
dimensions. If the data found in the input files is higher dimensionality
than (time, y, x), for example (time, pft, y, x), then the data is
summed across PFTs before plotting.
Returns `None`
'''
data, units = stitch_stages(var, timestep, stages)
print("data size:", data.size)
data = mask_by_cmt(data, cmtnum, ref_veg_map)
print("data count after masking cmt:", data.count())
data = mask_by_failed_run_status(data, ref_run_status)
print("data count after masking run status:", data.count())
if len(data.shape) == 5: # assume (time, pftpart, pft, y, x)
data = sum_across_compartments(data)
if len(data.shape) == 4: # assume (time, pft, y, x) or (time, layer, y, x)
data = sum_across_pfts(data) # alternatively, use sum_across_layers(..)
if len(data.shape) == 3: # assume (time, y, x)
pass # all set...
workhorse_spatial_summary_plot(data, cmtnum, units, var, stages, ax=ax)
def workhorse_spatial_summary_plot(data, cmtnum, yunits, varname, stages, ax=None):
'''
Worker function, plots a line for average along time axis (axis 0),
with shading for min and max.
`data`: (numpy.ndarray) must have dimensions (time, y, x).
`cmtnum`: (int) used for the plot title.
`varname`: (str) used for the plot title
`stages`: (list) used for the plot title, must contain one or
more of "pr","eq","sp","tr","sc".
`ax`: (matplotlib.axes._subplots.AxesSubplot instance) will plot line(s) on
this axes instance. If ax in None, then will create (and show) a new figure
and plot.
Returns `None`
'''
if ax is not None:
print("Plotting on existing ax instance...")
ax.plot(np.ma.average(data, axis=(1,2)), linewidth=0.5, label="CMT {}".format(cmtnum))
ax.fill_between(
np.arange(0, len(data)),
np.ma.min(data, axis=(1,2)),
np.ma.max(data, axis=(1,2)),
color='gray', alpha=0.25
)
ax.set_ylabel(yunits)
ax.set_title("{} for CMT {} averaged spatially for stages {}".format(varname, cmtnum, stages))
#plt.show(block=True)
else:
print("Plotting on new ax, figure...")
plt.plot(np.ma.average(data, axis=(1,2)), linewidth=0.5, label="CMT {}".format(cmtnum))
plt.fill_between(
np.arange(0, len(data)),
np.ma.min(data, axis=(1,2)),
np.ma.max(data, axis=(1,2)),
color='gray', alpha=0.25
)
plt.ylabel(yunits)
plt.title("{} for CMT {} averaged spatially for stages {}".format(varname, cmtnum, stages))
plt.show(block=True)
def plot_inputs(cmtnum, hist_fname, proj_fname, ref_veg_map):
'''
Plots the historic and projected climate inputs, averaging over the spatial
dimensions and with shading for min/max.
`cmtnum`: (int) CMT to work with, must be in the veg map, see mask_by_cmt(..).
`hist_fname`: (str) path to a dvmdostem historic input file.
`proj_fname`: (str) path to a dvmdostem projected input file.
The historic and projected input files are assumed to be have the variables
tair, precip, vapor_press, and nirr all with the dimensions (time, y, x).
The files are assumed to be monthly resolution.
Returns `None`.
'''
with nc.Dataset(hist_fname) as f:
htair = f.variables['tair'][:]
hprecip = f.variables['precip'][:]
hvp = f.variables['vapor_press'][:]
hnirr = f.variables['nirr'][:]
with nc.Dataset(proj_fname) as f:
ptair = f.variables['tair'][:]
pprecip = f.variables['precip'][:]
pvp = f.variables['vapor_press'][:]
pnirr = f.variables['nirr'][:]
tair = np.concatenate((htair, ptair), axis=0)
precip = np.concatenate((hprecip, pprecip), axis=0)
vp = np.concatenate((hvp, pvp), axis=0)
nirr = np.concatenate((hnirr, pnirr), axis=0)
tair = mask_by_cmt(tair, cmtnum, ref_veg_map)
precip = mask_by_cmt(precip, cmtnum, ref_veg_map)
vp = mask_by_cmt(vp, cmtnum, ref_veg_map)
nirr = mask_by_cmt(nirr, cmtnum, ref_veg_map)
#fig, (tair_ax, precip_ax, vp_ax, nirr_ax) = plt.subplots()
tair_ax = plt.subplot(411)
precip_ax = plt.subplot(412, sharex=tair_ax)
vp_ax = plt.subplot(413, sharex=tair_ax)
nirr_ax = plt.subplot(414, sharex=tair_ax)
for pax, data, vname in zip([tair_ax, precip_ax, vp_ax, nirr_ax], [tair, precip, vp, nirr], ['tair','precip','vapor_press','nirr']):
pax.plot(np.ma.average(data, axis=(1,2)), linewidth=0.5, label="{}".format(vname))
pax.fill_between(
np.arange(0, len(data)),
np.ma.min(data, axis=(1,2)),
np.ma.max(data, axis=(1,2)),
color='gray', alpha=0.25
)
# add a line marking the transition from historic to projected
pax.vlines(htair.shape[0], *pax.yaxis.get_view_interval(), color='red')
pax.set_title("{} averaged spatially for historic and projected masked to cmt{}".format(vname, cmtnum))
plt.tight_layout()
plt.show(block=True)
def boxplot_monthlies(var, stages, cmtnum, ref_veg_map, ref_run_status, facecolor='blue'):
'''
Makes a boxplot showing distribution of values for `var` for each month,
averaged over spatial dimensions, and only considering `cmtnum`. If multiple
stages are specified, the stages will be stitched together along the time
dimension before the distributions are calculated.
`var`: (str) one of the dvmdostem output variables.
`stages`: (list) must contain one or more of "pr","eq","sp","tr","sc".
`cmtnum`: (int) which CMT to work with.
`ref_veg_map`: (str) path to a vegetation map to use for masking cmts
`ref_run_status`: (str) path to run status map to use for masking failed cells
`facecolor`: (str) color to use for the box.
Returns `None`
'''
data, units = stitch_stages(var, 'monthly', stages)
print("data size:", data.size)
data = mask_by_cmt(data, cmtnum, ref_veg_map)
print("data count after masking cmt:", data.count())
data = mask_by_failed_run_status(data, ref_run_status)
print("data count after masking run status:", data.count())
# list of months
months = "Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec".split(' ')
# empty dictionary
monthstr2data = collections.OrderedDict()
# fill dict with data for each month
for i, m in enumerate(months):
b0 = data[i::12,:,:]
monthstr2data[m] = data[i::12,:,:]
data2 = [np.ma.average(i, axis=(1,2)) for i in list(monthstr2data.values())]
bp = plt.boxplot(
data2,
labels=list(monthstr2data.keys()),
#notch=True,
whis='range', # force whiskers to min/max range instead of quartiles
showfliers=False,
patch_artist=True,
boxprops=dict(facecolor=facecolor, alpha=.25),
whiskerprops=dict(color=facecolor),
capprops=dict(color=facecolor)
)
plt.ylabel(units)
plt.title("{} for CMT {}, averaged spatially ".format(var, cmtnum))
plt.show(block=True)
def boxplot_by_pft(var, timestep, cmtnum, stages, ref_veg_map, ref_run_status):
'''
Work in progress...
'''
data, units = stitch_stages(var, timestep, stages)
print("data size:", data.size)
print(data.shape)
d2 = data
# d2 = sum_across_compartments(data)
# print "data size after summing compartments:", d2.size
d3 = mask_by_cmt(d2, cmtnum, ref_veg_map)
print("data size after masking cmt:", d3.count())
d3 = mask_by_failed_run_status(d3, ref_run_status)
print("data count after masking run status:", d3.count())
pft0avg = np.ma.average(d3, axis=(2,3))
#plt.plot(pft0avg) # Line plot
plt.boxplot(
pft0avg,
labels = ["PFT {}".format(i) for i in range(0, 10)],
whis='range',
showfliers=False,
patch_artist=True,
boxprops=dict(color='blue', alpha=0.25),
whiskerprops=dict(color='red'),
capprops=dict(color='blue'),
)
plt.ylabel(units)
plt.show(block=True)
def plot_soil_layers():
'''
WORK IN PROGRESS!
Attempts to display an intuitive representation of the soil column using
a horizontal bar chart.
- bar width is set by the value of the variable under investigation
- bar color is set by the LAYERTYPE
- bar height (thickness?) is controlled by the LAYERDZ
The y axis is depth (cumulative sum of LAYERDZ)
'''
with nc.Dataset("all-merged/SOC_monthly_tr.nc") as f:
soc = np.ma.masked_values(f.variables['SOC'][:], f.variables['SOC']._FillValue)
soc = np.ma.masked_values(soc, -99999)
soc = average_monthly_pool_to_yearly(soc)
with nc.Dataset("all-merged/LAYERDZ_yearly_tr.nc") as dzf:
dz = dzf.variables['LAYERDZ'][:]
with nc.Dataset("all-merged/LAYERTYPE_yearly_tr.nc") as ltf:
lt = ltf.variables['LAYERTYPE'][:]
Y = 0
X = 0
time = 78
def cmapper(x):
# moss shallow deep mineral undefined
c = ['green', 'red', 'black', 'gray', 'y']
if isinstance(x, np.ma.core.MaskedConstant):
return c[-1]
else:
return c[int(x)]
colors = list(map(cmapper, lt[time,:,Y,X]))
bottoms = np.cumsum(dz[time,:,Y,X]) * -1 # <-- reverses y axis!
widths = soc[time,:,Y,X]
heights = dz[time,:,Y,X]
plt.barh(bottoms, widths, heights, color=colors)
def plot_fronts(args):
'''
Makes a timeseries plot of all the fronts that are output. The X axis is time,
the Y axis is depth from surface (0 at the top of the plot). Each front will
have a line on the plot.
Blue dots are used for FREEZING fronts (frozen above, warm below, values > 0)
Red is used for THAWING fronts (warm above, frozen below, value < 0)
# NOTE:
# In the model, there are two places that fronts info is stored:
# a pair of deques and a pair of arrays. The arrays are set to have a max
# size of 10 (MAX_FRONTS or something like that) As the model runs, the
# values are written periodically from the dequees into the arrays. The
# arrays are the structures that are output to the netcdf files. Our output
# netcdf files and output_spec files are not setup to deal with a "fronts"
# dimension. So for now, since we know there will only be
# 10 fronts, we'll store the stuff in files with a layer dimension, using the
# first 10 slots. After a little testing it looks like there are rarely more
# than 2 fronts, so this setup is not space efficient.
'''
time = args.timestep
Y, X = args.yx
od = args.outfolder
timeres = (args.timeres).lower()
stage = (args.stage).lower()
ftype, ftype_units = pull_data_wrapper(args, variable='FRONTSTYPE', required_dims=['time','layer','y','x'])
fdepth, fdepth_units = pull_data_wrapper(args, variable='FRONTSDEPTH', required_dims=['time','layer','y','x'])
if fdepth_units == '':
print("WARNING! Missing depth units! Assumed to be meters.")
depthunits = 'm'
if ftype_units == '':
print("WARNING! Missing front type units! Assumed to be categorical.")
dzunits = 'categorical'
# Setup the plotting
ROWS=1; COLS=1
gs = gridspec.GridSpec(ROWS, COLS)
fig = plt.figure()
ax0 = plt.subplot(gs[:])
ax0.set_ylabel("Depth ({})".format(fdepth_units))
ax0.set_xlabel("Time")
ax0.set_title("{}".format(od))
x = np.arange(0, fdepth.shape[0])
for fnt_idx in range(0,10):
front_thaw = ax0.scatter(x, np.ma.masked_where(ftype[:,fnt_idx,Y,X] > 0, fdepth[:,fnt_idx,Y,X]), color='orange', marker='o')
front_thaw_line = ax0.plot(np.ma.masked_where(ftype[:,fnt_idx,Y,X] > 0, fdepth[:,fnt_idx,Y,X]), label='thaw front {}'.format(fnt_idx), color='orange', alpha=0.5)
front_freeze = ax0.scatter(x ,np.ma.masked_where(ftype[:,fnt_idx,Y,X] < 0, fdepth[:,fnt_idx,Y,X]), color='blue', marker='o')
front_freeze_line = ax0.plot(np.ma.masked_where(ftype[:,fnt_idx,Y,X] < 0, fdepth[:,fnt_idx,Y,X]), label='freeze front {}'.format(fnt_idx), color='blue', alpha=0.5)
if args.show_layers:
layerdepth, layerdepth_units = pull_data_wrapper(args, variable="LAYERDEPTH", required_dims=['time','layer','y','x'])
layer_lines = []
for lidx in range(0,layerdepth.shape[1]):
layerline = ax0.plot(layerdepth[:,lidx,Y,X], color='gray', alpha=0.5, linewidth=0.5, marker='o', markersize=.75)
layer_lines.append(layerline)
if args.layer_colors:
ltype, ltype_units = pull_data_wrapper(args, "LAYERTYPE")
for il, l in enumerate(layer_lines):
if il == 0:
pass
else:
currl = l[0]
prevl = layer_lines[il-1][0]
# Make sure to grab the previous layer (il-1) for the layer type condition!
ax0.fill_between(x, currl.get_ydata(), prevl.get_ydata(), ltype[:,il-1,Y,X] == 0, color='xkcd:green', alpha=.5)
ax0.fill_between(x, currl.get_ydata(), prevl.get_ydata(), ltype[:,il-1,Y,X] == 1, color='xkcd:sand', alpha=.5)
ax0.fill_between(x, currl.get_ydata(), prevl.get_ydata(), ltype[:,il-1,Y,X] == 2, color='xkcd:coffee', alpha=.5)
ax0.fill_between(x, currl.get_ydata(), prevl.get_ydata(), ltype[:,il-1,Y,X] == 3, color='xkcd:silver', alpha=.5)
# This is super cluttered in the default view if there are many layers or if
# the time axis is long, but looks good when you zoom in.
ax0.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(12))
ax0.xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(6))
ax0.invert_yaxis()
plt.show(block=True)
def pull_data_wrapper(args, variable=None, required_dims=None):
'''
Extracts data from an netcdf file.
`args` must be a dict with settings for outfolder, timeres, and stage.
`variable` must be a string with the variable name that is expected to be in
the netcdf file. The file is expected to be names like this:
"VARIABLE_TIMERES_STAGE.nc" and is expected to be present in the
args.outfolder.
If required_dims is passed, then the dimensions of the variable to extract
are checked against the list and a RuntimeError is thrown if there is a
a problem.
Returns a tuple (data, units), where data is a numpy array or masked array
and units is a string that is extracted from the attributs of the netcdf file.
'''
od = args.outfolder
timeres = (args.timeres).lower()
stage = (args.stage).lower()
def pull_data(the_var, required_dims):
'''Pulls data out of an nc file'''
fglob = os.path.join(od, "{}_{}_{}.nc".format(the_var, timeres, stage))
the_file = glob.glob(fglob)
if len(the_file) < 1:
raise RuntimeError("Can't find file for variable '{}' here: {}".format(the_var, fglob))
if len(the_file) > 1:
raise RuntimeError("Appears to be more than one file matching glob?: {}".format(fglob))
the_file = the_file[0]
print("Pulling data from ", the_file)
with nc.Dataset(the_file, 'r') as ds:
if required_dims != None:
for rd in required_dims:
if rd not in list(ds.dimensions.keys()):
raise RuntimeError("'{}' is a required dimension for this operation. File: {}".format(rd, the_file))
data = ds.variables[the_var][:]
units = ds.variables[the_var].units
return data, units
data, units = pull_data(variable, required_dims)
return data, units
def plot_soil_layers2(args):
'''
Makes plots of soil profile variables. Generates one plot/axis for each
variable specified. Y axis is the depth, X axis is the value of the variable.
Plots include horizontal lines marking layer boundaries.
Calling code must supply`args` which must be a dictionary with the following
keys set:
`outfolder`: (string) a path to a folder containing dvmdostem output files.
`time`: (int) index along time axis to plot
`yx`: (tuple) the pixel coordinates to source for the plot(s)
`timeres`: (string) either 'monthly', 'yearly', or 'daily' (daily is untested)
`stage`: (string) the run stage to plot from
`vars`: a list of variables names to plot, e.g. ['TLAYER', 'SOC', 'ORGN']
The function will look in `outfolder` for the appropriate dvmdostem output
files basedon variable name, stage, and time resolution.
NOTE: The LAYERDZ and LAYERDEPTH files must be present in `outfolder` for
the speficied `stage` and `timeres`!
'''
od = args.outfolder
time = args.timestep
Y, X = args.yx
timeres = (args.timeres).lower()
stage = (args.stage).lower()
# Need to specify units in output_spec files!!
depth, depthunits = pull_data_wrapper(args, 'LAYERDEPTH', required_dims=['time','layer','y','x'])
dz, dzunits = pull_data_wrapper(args, 'LAYERDZ', required_dims=['time','layer','y','x',])
if depthunits == '':
print("WARNING! Missing depth units! Assumed to be meters.")
depthunits = 'm'
if dzunits == '':
print("WARNING! Missing dz units! Assumed to be meters.")
dzunits = 'm'
if dzunits != depthunits:
print("WARNING! depthunits ({}) and dzunits ({}) are not the same!".format(depthunits, dzunits))
# Setup the plotting
ROWS=1; COLS=len([v.upper() for v in args.vars])
gs = gridspec.GridSpec(ROWS, COLS)
fig = plt.figure()
ax0 = plt.subplot(gs[:,0])
ax0.set_ylabel("Depth ({})".format(depthunits))
for i, v in enumerate([v.upper() for v in args.vars]):
if i == 0:
ax0.set_title(v)
ax0.invert_yaxis()
else:
ax = plt.subplot(gs[:,i], sharey=ax0)
ax.set_title(v)
for ax in fig.axes:
vardata, units = pull_data_wrapper(args, ax.get_title())
# Line plot, offset so markers are at the midpoint of layer.'''
ax.plot(
vardata[time,:,Y,X],
depth[time,:,Y,X] + (0.5 * dz[time,:,Y,X]),
#color='red',
marker='o',
markeredgecolor='gray',
#markerfacecolor='red',
alpha=0.85,
)
if ax.get_title() == 'TLAYER':
ymin, ymax = ax.yaxis.get_view_interval()
ax.vlines(0, ymin, ymax, linestyles='solid', color='red')
# First attempt was to use horizontal bars to display variables that
# represent mass or volume (e.g. SOC). This worked for versions of
# matplotlib < 2.x, but in the more recent versions there is some
# issue and the y scale gets really messed up when plotting bars.
#
# if ax.get_title().upper() in ['SOC','VWC']:
# ''' For volume/mass stuff, use bars'''
# ax.barh(
# depth[time,:,Y,X], # bottom
# vardata[time,:,Y,X], # width
# dz[time,:,Y,X], # height
# )
# else:
# '''Line plot, offset so markers are at the midpoint of layer.'''
# ax.plot(
# vardata[time,:,Y,X],
# depth[time,:,Y,X] + (0.5 * dz[time,:,Y,X]),
# marker='o',
# )
# Label the X axis
ax.set_xlabel(units)
# Put in the layer markers.
xmin, xmax = ax.xaxis.get_view_interval()
ax.hlines(
depth[time,:,Y,X], # y positions
xmin, xmax, # x min and max
linestyles='dashed', color='orange'
)
ax.grid(False, which='both', axis='both')
# Turn off y axis labels for all but the left one (first axes instance)
for ax in fig.axes[1:]:
ax.yaxis.set_visible(False)
fig.suptitle("Soil Profile stage: {} {}, timestep: {}".format(stage, timeres, time))
plt.show(block=True)
def print_soil_table(outdir, stage, timeres, Y, X, timestep):
'''
Prints a table to stdout with all the soil info.
Looks in the `outdir` for any .nc file with 'layer' in the dimension list,
and `stage` and `timeres` in the name, e.g. "SOC_monthly_tr.nc". Each
appropriate file found will get a column in the printed table. The printed
table will be for the pixel specified by `Y` and `X` and for the specified
`timestep`
Prints a very wide table if there are many by-layer outputs available. A neat
addition to this function would be a better way to control the width.
'''
def get_var_name(fpath):
'''Extract variable name from full path'''
return os.path.basename(fpath).split("_")[0]
allncfiles = glob.glob(os.path.join(outdir, "*_{}_{}.nc".format(timeres, stage)))
soilfiles = []
soillayerdimlengths = []
for f in allncfiles:
with nc.Dataset(f) as ds:
if 'layer' in ds.dimensions:
try:
numlayers = ds.dimensions['layer'].size # attribute only available in netCDF4 > 1.2.2
except AttributeError as e:
numlayers = len(ds.dimensions['layer'])
soilfiles.append(f)
soillayerdimlengths.append(numlayers)
soillayerdimlengths = set(soillayerdimlengths)
if len(soillayerdimlengths) > 1:
raise RuntimeError("Not all files/variables have the same lenght of layer dimensions")
header_fmt = "{:>15s} " * len(soilfiles)
row_fmt = "{:>15.3f} " * len(soilfiles)
varlist = [get_var_name(f) for f in soilfiles]
print("---- Soil Profile ----")
print(" output directory: {}".format(outdir))
print(" {} files stage: {} pixel(y,x): ({},{}) timestep: {}".format(timeres.upper(), stage.upper(), Y, X, timestep))
print(header_fmt.format(*varlist))
# This is probably not very effecient.
for il in range(0, soillayerdimlengths.pop()):
data = []
for v, f in zip(varlist, soilfiles):
with nc.Dataset(f) as ds:
if type(ds.variables[v][timestep,il,Y,X]) == 'str':
data.append(np.nan)
if ds.variables[v][timestep,il,Y,X] is np.ma.masked: # works in recent numpy versions
data.append(np.nan)
if | np.ma.is_masked(ds.variables[v][timestep, il, Y,X]) | numpy.ma.is_masked |
# https://github.com/pytorch/ignite/blob/master/examples/notebooks/FashionMNIST.ipynb
# https://www.kaggle.com/stuarthallows/using-xgboost-with-scikit-learn
import time
import sys, os
from sklearn.metrics import f1_score
idx = os.getcwd().index("trade")
PROJECT_HOME = os.getcwd()[:idx] + "trade"
sys.path.append(PROJECT_HOME)
import xgboost as xgb
from pytz import timezone
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from common.global_variables import *
import matplotlib.pyplot as plt
from common.utils import *
from web.db.database import Model, trade_db_session
import numpy as np
import os
from common.logger import get_logger
from codes.upbit.upbit_api import Upbit
from codes.upbit.upbit_order_book_based_data import UpbitOrderBookBasedData
from common.utils import save_model
import torch.nn.modules.loss
import warnings
import gc
warnings.filterwarnings("ignore")
logger = get_logger("make_models")
def mkdir_models():
if not os.path.exists(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE)):
os.makedirs(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE))
if not os.path.exists(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE, 'graphs')):
os.makedirs(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE, 'graphs'))
if not os.path.exists(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE, 'SCALERS')):
os.makedirs(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE, 'SCALERS'))
def save_graph(coin_name, model_type, valid_loss_min, last_valid_accuracy, last_save_epoch, valid_size, one_count_rate,
avg_train_losses, train_accuracy_list, avg_valid_losses, valid_accuracy_list):
files = glob.glob(os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE, model_type, coin_name + '*'))
for f in files:
os.remove(f)
plt.clf()
fig, ax_lst = plt.subplots(2, 2, figsize=(30, 10), gridspec_kw={'hspace': 0.35})
ax_lst[0][0].plot(range(len(avg_train_losses)), avg_train_losses)
ax_lst[0][0].set_title('AVG. TRAIN LOSSES', fontweight="bold", size=10)
ax_lst[0][1].plot(range(len(train_accuracy_list)), train_accuracy_list)
ax_lst[0][1].set_title('TRAIN ACCURACY CHANGE', fontweight="bold", size=10)
ax_lst[0][1].set_xlabel('EPISODES', size=10)
ax_lst[1][0].plot(range(len(avg_valid_losses)), avg_valid_losses)
ax_lst[1][0].set_title('AVG. VALIDATION LOSSES', fontweight="bold", size=10)
ax_lst[1][1].plot(range(len(valid_accuracy_list)), valid_accuracy_list)
ax_lst[1][1].set_title('VALIDATION ACCURACY CHANGE', fontweight="bold", size=10)
ax_lst[1][1].set_xlabel('EPISODES', size=10)
filename = os.path.join(PROJECT_HOME, LOCAL_MODEL_SOURCE, model_type, 'graphs', "{0}_{1}_{2:.2f}_{3:.2f}_{4}_{5:.2f}.png".format(
coin_name,
last_save_epoch,
valid_loss_min,
last_valid_accuracy,
valid_size,
one_count_rate
))
plt.savefig(filename)
plt.close('all')
def train(optimizer, model, criterion, train_losses, x_train_normalized, y_up_train):
model.train()
optimizer.zero_grad()
out = model.forward(x_train_normalized)
loss = criterion(out, y_up_train)
loss.backward()
optimizer.step()
train_losses.append(loss.item())
out = torch.sigmoid(out)
t = torch.tensor(0.5).to(DEVICE)
output_index = (out > t).float() * 1
return y_up_train.size(0), (output_index == y_up_train).sum().float()
def post_train_processing(train_losses, avg_train_losses, train_accuracy_list, correct, total_size):
train_loss = np.average(train_losses)
avg_train_losses.append(train_loss)
train_accuracy = 100 * correct / total_size
train_accuracy_list.append(train_accuracy)
return train_loss, train_accuracy
def validate(epoch, model, criterion, valid_losses, x_valid_normalized, y_up_valid):
model.eval()
out = model.forward(x_valid_normalized)
loss = criterion(out, y_up_valid)
valid_losses.append(loss.item())
out = torch.sigmoid(out)
t = torch.tensor(0.5).to(DEVICE)
output_index = (out > t).float() * 1
if VERBOSE: logger.info("Epoch {0} - Y_pred: {1}, Y_true: {2}".format(epoch, output_index, y_up_valid))
return y_up_valid.size(0), (output_index == y_up_valid).sum().float()
def post_validation_processing(valid_losses, avg_valid_losses, valid_accuracy_list, correct, total_size):
valid_loss = np.average(valid_losses)
avg_valid_losses.append(valid_loss)
valid_accuracy = 100 * correct / total_size
valid_accuracy_list.append(valid_accuracy)
return valid_loss, valid_accuracy
def make_model(model_type, x_normalized, y_up, global_total_size, X_train, y_train, X_test, y_test, one_rate):
if VERBOSE:
logger.info("[[[{0}]]]".format(model_type))
model = load_model(model_type=model_type)
if model:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
best_model = model
best_f1_score = f1_score(y_test, y_pred)
else:
gc.collect()
coin_model_start_time = time.time()
X = x_normalized.reshape(global_total_size, -1)
y = y_up
if model_type == "GB":
param_grid = {
'learning_rate': [0.01, 0.05, 0.1],
'max_depth': | np.linspace(1, 8, 4, endpoint=True) | numpy.linspace |
import scipy.optimize as sco
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
from keras.layers import Input, Dense
from keras.models import Model
from keras import regularizers
from keras.models import load_model
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from utils import portfolio
from hrp_routines import *
class HRPAgent:
def __init__(
self,
portfolio_size,
allow_short = True,
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.input_shape = (portfolio_size, portfolio_size, )
def act(self, returns):
corr = returns.corr()
cov = returns.cov()
optimal_weights = getHRP(cov, corr)
if self.allow_short:
optimal_weights /= sum(np.abs(optimal_weights))
else:
optimal_weights += np.abs(np.min(optimal_weights))
optimal_weights /= sum(optimal_weights)
return optimal_weights
class AutoencoderAgent:
def __init__(
self,
portfolio_size,
allow_short = True,
encoding_dim = 25
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.encoding_dim = encoding_dim
def model(self):
input_img = Input(shape=(self.portfolio_size, ))
encoded = Dense(self.encoding_dim, activation='relu', kernel_regularizer=regularizers.l2(1e-6))(input_img)
decoded = Dense(self.portfolio_size, activation= 'linear', kernel_regularizer=regularizers.l2(1e-6))(encoded)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adam', loss='mse')
return autoencoder
def act(self, returns):
data = returns
autoencoder = self.model()
autoencoder.fit(data, data, shuffle=False, epochs=25, batch_size=32, verbose=False)
reconstruct = autoencoder.predict(data)
communal_information = []
for i in range(0, len(returns.columns)):
diff = np.linalg.norm((returns.iloc[:,i] - reconstruct[:,i])) # 2 norm difference
communal_information.append(float(diff))
optimal_weights = np.array(communal_information) / sum(communal_information)
if self.allow_short:
optimal_weights /= sum(np.abs(optimal_weights))
else:
optimal_weights += np.abs(np.min(optimal_weights))
optimal_weights /= sum(optimal_weights)
return optimal_weights
class SmoothingAgent:
def __init__(
self,
portfolio_size,
allow_short = True,
forecast_horizon = 252,
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.forecast_horizon = forecast_horizon
def act(self, timeseries):
optimal_weights = []
for asset in timeseries.columns:
ts = timeseries[asset]
fit1 = Holt(ts).fit()
forecast = fit1.forecast(self.forecast_horizon)
prediction = forecast.values[-1] - forecast.values[0]
optimal_weights.append(prediction)
if self.allow_short:
optimal_weights /= sum(np.abs(optimal_weights))
else:
optimal_weights += np.abs(np.min(optimal_weights))
optimal_weights /= sum(optimal_weights)
return optimal_weights
class PCAAgent:
def __init__(
self,
portfolio_size,
pc_id = 0,
pca_max = 10,
allow_short = False,
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.input_shape = (portfolio_size, portfolio_size, )
self.pc_id = pc_id
self.pc_max = pca_max
def act(self, returns):
C = self.pc_max
pca = PCA(C)
returns_pca = pca.fit_transform(returns)
pcs = pca.components_
pc1 = pcs[self.pc_id, :]
if self.allow_short:
optimal_weights = pc1 / sum(np.abs(pc1))
else:
optimal_weights += np.abs(np.min(optimal_weights))
optimal_weights /= sum(optimal_weights)
return optimal_weights
class MaxReturnsAgent:
def __init__(
self,
portfolio_size,
allow_short = False,
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.input_shape = (portfolio_size, portfolio_size, )
def act(self, returns):
def loss(weights):
return -portfolio(returns, weights)[0]
n_assets = len(returns.columns)
if self.allow_short:
bnds = tuple((-1.0, 1.0) for x in range(n_assets))
cons =({'type': 'eq', 'fun': lambda x : 1.0 - np.sum(np.abs(x))})
else:
bnds = tuple((0.0, 1.0) for x in range(n_assets))
cons =({'type': 'eq', 'fun': lambda x : 1.0 - np.sum(x)})
opt_S = sco.minimize(
loss,
n_assets * [1.0 / n_assets],
method = 'SLSQP', bounds = bnds,
constraints = cons)
optimal_weights = opt_S['x']
# sometimes optimization fails with constraints, need to be fixed by hands
if self.allow_short:
optimal_weights /= sum(np.abs(optimal_weights))
else:
optimal_weights += np.abs(np.min(optimal_weights))
optimal_weights /= sum(optimal_weights)
return optimal_weights
class MinVarianceAgent:
def __init__(
self,
portfolio_size,
allow_short = False,
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.input_shape = (portfolio_size, portfolio_size, )
def act(self, returns):
def loss(weights):
return portfolio(returns, weights)[1]**2
n_assets = len(returns.columns)
if self.allow_short:
bnds = tuple((-1.0, 1.0) for x in range(n_assets))
cons =({'type': 'eq', 'fun': lambda x : 1.0 - np.sum(np.abs(x))})
else:
bnds = tuple((0.0, 1.0) for x in range(n_assets))
cons =({'type': 'eq', 'fun': lambda x : 1.0 - np.sum(x)})
opt_S = sco.minimize(
loss,
n_assets * [1.0 / n_assets],
method = 'SLSQP', bounds = bnds,
constraints = cons)
optimal_weights = opt_S['x']
# sometimes optimization fails with constraints, need to be fixed by hands
if self.allow_short:
optimal_weights /= sum(np.abs(optimal_weights))
else:
optimal_weights += np.abs(np.min(optimal_weights))
optimal_weights /= sum(optimal_weights)
return optimal_weights
class MaxSharpeAgent:
def __init__(
self,
portfolio_size,
allow_short = False,
):
self.portfolio_size = portfolio_size
self.allow_short = allow_short
self.input_shape = (portfolio_size, portfolio_size, )
def act(self, returns):
def loss(weights):
return -portfolio(returns, weights)[2]
n_assets = len(returns.columns)
if self.allow_short:
bnds = tuple((-1.0, 1.0) for x in range(n_assets))
cons =({'type': 'eq', 'fun': lambda x : 1.0 - np.sum(np.abs(x))})
else:
bnds = tuple((0.0, 1.0) for x in range(n_assets))
cons =({'type': 'eq', 'fun': lambda x : 1.0 - | np.sum(x) | numpy.sum |
from enum import Enum
from typing import Any, Dict, List, Optional
import attr
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from einops import rearrange
from hydra.utils import instantiate
from IPython.display import clear_output
from tqdm import tqdm
# internal
from artefact_nca.base.base_torch_trainer import BaseTorchTrainer
from artefact_nca.dataset.voxel_dataset import VoxelDataset
from artefact_nca.model.voxel_ca_model import VoxelCAModel
from artefact_nca.utils.minecraft import * # noqa
from artefact_nca.utils.minecraft.voxel_utils import replace_colors
# zero out a cube
def damage_cube(state, x, y, z, half_width):
damaged = state.clone()
x_dim = state.shape[1]
y_dim = state.shape[2]
z_dim = state.shape[3]
from_x = np.clip(x - half_width, a_min=0, a_max=x_dim)
from_y = np.clip(y - half_width, a_min=0, a_max=y_dim)
from_z = | np.clip(z - half_width, a_min=0, a_max=z_dim) | numpy.clip |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.characterisation.correction` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from itertools import permutations
from numpy.linalg import LinAlgError
from colour.characterisation.correction import (
augmented_matrix_Cheung2004, polynomial_expansion_Finlayson2015,
polynomial_expansion_Vandermonde, colour_correction_matrix_Cheung2004,
colour_correction_matrix_Finlayson2015,
colour_correction_matrix_Vandermonde, colour_correction_Cheung2004,
colour_correction_Finlayson2015, colour_correction_Vandermonde)
from colour.utilities import ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'MATRIX_TEST', 'MATRIX_REFERENCE', 'TestAugmentedMatrixCheung2004',
'TestPolynomialExpansionFinlayson2015',
'TestPolynomialExpansionVandermonde',
'TestColourCorrectionMatrixCheung2004',
'TestColourCorrectionMatrixFinlayson2015',
'TestColourCorrectionMatrixVandermonde', 'TestColourCorrectionCheung2004',
'TestColourCorrectionFinlayson2015', 'TestColourCorrectionVandermonde'
]
MATRIX_TEST = np.array([
[0.17224810, 0.09170660, 0.06416938],
[0.49189645, 0.27802050, 0.21923399],
[0.10999751, 0.18658946, 0.29938611],
[0.11666120, 0.14327905, 0.05713804],
[0.18988879, 0.18227649, 0.36056247],
[0.12501329, 0.42223442, 0.37027445],
[0.64785606, 0.22396782, 0.03365194],
[0.06761093, 0.11076896, 0.39779139],
[0.49101797, 0.09448929, 0.11623839],
[0.11622386, 0.04425753, 0.14469986],
[0.36867946, 0.44545230, 0.06028681],
[0.61632937, 0.32323906, 0.02437089],
[0.03016472, 0.06153243, 0.29014596],
[0.11103655, 0.30553067, 0.08149137],
[0.41162190, 0.05816656, 0.04845934],
[0.73339206, 0.53075188, 0.02475212],
[0.47347718, 0.08834792, 0.30310315],
[0.00000000, 0.25187016, 0.35062450],
[0.76809639, 0.78486240, 0.77808297],
[0.53822392, 0.54307997, 0.54710883],
[0.35458526, 0.35318419, 0.35524431],
[0.17976704, 0.18000531, 0.17991488],
[0.09351417, 0.09510603, 0.09675027],
[0.03405071, 0.03295077, 0.03702047],
])
MATRIX_REFERENCE = np.array([
[0.15579559, 0.09715755, 0.07514556],
[0.39113140, 0.25943419, 0.21266708],
[0.12824821, 0.18463570, 0.31508023],
[0.12028974, 0.13455659, 0.07408400],
[0.19368988, 0.21158946, 0.37955964],
[0.19957424, 0.36085439, 0.40678123],
[0.48896605, 0.20691688, 0.05816533],
[0.09775522, 0.16710693, 0.47147724],
[0.39358649, 0.12233400, 0.10526425],
[0.10780332, 0.07258529, 0.16151473],
[0.27502671, 0.34705454, 0.09728099],
[0.43980441, 0.26880559, 0.05430533],
[0.05887212, 0.11126272, 0.38552469],
[0.12705825, 0.25787860, 0.13566464],
[0.35612929, 0.07933258, 0.05118732],
[0.48131976, 0.42082843, 0.07120612],
[0.34665585, 0.15170714, 0.24969804],
[0.08261116, 0.24588716, 0.48707733],
[0.66054904, 0.65941137, 0.66376412],
[0.48051509, 0.47870296, 0.48230082],
[0.33045354, 0.32904184, 0.33228886],
[0.18001305, 0.17978567, 0.18004416],
[0.10283975, 0.10424680, 0.10384975],
[0.04742204, 0.04772203, 0.04914226],
])
class TestAugmentedMatrixCheung2004(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition unit tests methods.
"""
def test_augmented_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
polynomials = [
np.array([0.17224810, 0.09170660, 0.06416938]),
np.array(
[0.17224810, 0.09170660, 0.06416938, 0.00101364, 1.00000000]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.00101364, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00511050, 0.00077126, 0.00026423, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00511050, 0.00077126,
0.00026423
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00511050, 0.00077126,
0.00026423, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00190387, 0.00144862,
0.00037762, 0.00511050, 0.00077126, 0.00026423
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00190387, 0.00144862,
0.00037762, 0.00511050, 0.00077126, 0.00026423, 1.00000000
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.01579629, 0.01105305,
0.00588476, 0.02966941, 0.00841010, 0.00411771, 0.00101364,
0.00272088, 0.00053967, 0.00070927, 0.00190387, 0.00144862,
0.00037762, 0.00511050, 0.00077126, 0.00026423, 0.00017460,
0.00009296, 0.00006504
]),
]
for i, terms in enumerate([3, 5, 7, 8, 10, 11, 14, 16, 17, 19, 20,
22]):
np.testing.assert_almost_equal(
augmented_matrix_Cheung2004(RGB, terms),
polynomials[i],
decimal=7)
def test_raise_exception_augmented_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition raised exception.
"""
self.assertRaises(ValueError, augmented_matrix_Cheung2004,
np.array([0.17224810, 0.09170660, 0.06416938]), 4)
@ignore_numpy_errors
def test_nan_augmented_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
augmented_matrix_Cheung2004` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
augmented_matrix_Cheung2004(case)
class TestPolynomialExpansionFinlayson2015(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition unit tests methods.
"""
def test_polynomial_expansion_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
polynomials = [
[
np.array([0.17224810, 0.09170660, 0.06416938]),
np.array([0.17224810, 0.09170660, 0.06416938])
],
[
np.array([
0.17224810, 0.09170660, 0.06416938, 0.02966941, 0.00841010,
0.00411771, 0.01579629, 0.00588476, 0.01105305
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.12568328, 0.07671216,
0.10513350
])
],
[
np.array([
0.17224810, 0.09170660, 0.06416938, 0.02966941, 0.00841010,
0.00411771, 0.01579629, 0.00588476, 0.01105305, 0.00511050,
0.00077126, 0.00026423, 0.00144862, 0.00037762, 0.00070927,
0.00272088, 0.00053967, 0.00190387, 0.00101364
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.12568328, 0.07671216,
0.10513350, 0.11314930, 0.07228010, 0.08918053, 0.13960570,
0.08141598, 0.12394021, 0.10045255
])
],
[
np.array([
0.17224810, 0.09170660, 0.06416938, 0.02966941, 0.00841010,
0.00411771, 0.01579629, 0.00588476, 0.01105305, 0.00511050,
0.00077126, 0.00026423, 0.00144862, 0.00037762, 0.00070927,
0.00272088, 0.00053967, 0.00190387, 0.00101364, 0.00088027,
0.00007073, 0.00001696, 0.00046867, 0.00032794, 0.00013285,
0.00004949, 0.00004551, 0.00002423, 0.00024952, 0.00003463,
0.00012217, 0.00017460, 0.00009296, 0.00006504
]),
np.array([
0.17224810, 0.09170660, 0.06416938, 0.12568328, 0.07671216,
0.10513350, 0.11314930, 0.07228010, 0.08918053, 0.13960570,
0.08141598, 0.12394021, 0.10045255, 0.14713499, 0.13456986,
0.10735915, 0.08387498, 0.08213618, 0.07016104, 0.11495009,
0.09819082, 0.08980545
])
],
]
for i in range(4):
np.testing.assert_almost_equal(
polynomial_expansion_Finlayson2015(RGB, i + 1, False),
polynomials[i][0],
decimal=7)
np.testing.assert_almost_equal(
polynomial_expansion_Finlayson2015(RGB, i + 1, True),
polynomials[i][1],
decimal=7)
def test_raise_exception_polynomial_expansion_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition raised exception.
"""
self.assertRaises(ValueError, polynomial_expansion_Finlayson2015,
np.array([0.17224810, 0.09170660, 0.06416938]), 5)
@ignore_numpy_errors
def test_nan_polynomial_expansion_Finlayson2015(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Finlayson2015` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
polynomial_expansion_Finlayson2015(case)
class TestPolynomialExpansionVandermonde(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
polynomial_expansion_Vandermonde` definition unit tests methods.
"""
def test_polynomial_expansion_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Vandermonde` definition.
"""
RGB = np.array([0.17224810, 0.09170660, 0.06416938])
polynomials = [
np.array([0.17224810, 0.09170660, 0.06416938, 1.00000000]),
np.array([
0.02966941, 0.00841010, 0.00411771, 0.17224810, 0.09170660,
0.06416938, 1.00000000
]),
np.array([
0.00511050, 0.00077126, 0.00026423, 0.02966941, 0.00841010,
0.00411771, 0.17224810, 0.09170660, 0.06416938, 1.00000000
]),
np.array([
0.00088027, 0.00007073, 0.00001696, 0.00511050, 0.00077126,
0.00026423, 0.02966941, 0.00841010, 0.00411771, 0.17224810,
0.09170660, 0.06416938, 1.00000000
]),
]
for i in range(4):
np.testing.assert_almost_equal(
polynomial_expansion_Vandermonde(RGB, i + 1),
polynomials[i],
decimal=7)
@ignore_numpy_errors
def test_nan_polynomial_expansion_Vandermonde(self):
"""
Tests :func:`colour.characterisation.correction.\
polynomial_expansion_Vandermonde` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
polynomial_expansion_Vandermonde(case)
class TestColourCorrectionMatrixCheung2004(unittest.TestCase):
"""
Defines :func:`colour.characterisation.correction.\
colour_correction_matrix_Cheung2004` definition unit tests methods.
"""
def test_colour_correction_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.\
colour_correction_matrix_Cheung2004` definition.
"""
np.testing.assert_almost_equal(
colour_correction_matrix_Cheung2004(MATRIX_TEST, MATRIX_REFERENCE),
np.array([
[0.69822661, 0.03071629, 0.16210422],
[0.06893498, 0.67579611, 0.16430385],
[-0.06314956, 0.09212471, 0.97134152],
]),
decimal=7)
np.testing.assert_almost_equal(
colour_correction_matrix_Cheung2004(
MATRIX_TEST, MATRIX_REFERENCE, terms=7),
np.array([
[
0.80512769, 0.04001012, -0.01255261, -0.41056170,
-0.28052094, 0.68417697, 0.02251728
],
[
0.03270288, 0.71452384, 0.17581905, -0.00897913,
0.04900199, -0.17162742, 0.01688472
],
[
-0.03973098, -0.07164767, 1.16401636, 0.29017859,
-0.88909018, 0.26675507, 0.02345109
],
]),
decimal=7)
@ignore_numpy_errors
def test_nan_colour_correction_matrix_Cheung2004(self):
"""
Tests :func:`colour.characterisation.correction.
colour_correction_matrix_Cheung2004` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = list(set(permutations(cases * 3, r=3)))[0:4]
for case in cases:
try:
colour_correction_matrix_Cheung2004(
| np.vstack([case, case, case]) | numpy.vstack |
from datetime import datetime, timedelta
from io import StringIO
import re
import sys
import numpy as np
import pytest
from pandas._libs.tslib import iNaT
from pandas.compat import PYPY
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_object_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
PeriodIndex,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
)
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import DatetimeArray, PandasArray, TimedeltaArray
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
import pandas.util.testing as tm
class CheckStringMixin:
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
def test_tricky_container(self):
if not hasattr(self, "unicode_container"):
pytest.skip("Need unicode_container to test with this")
repr(self.unicode_container)
str(self.unicode_container)
class CheckImmutable:
mutable_regex = re.compile("does not support mutable operations")
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to pytest.raises
# (after the Exception kind).
with pytest.raises(TypeError):
self.mutable_regex(*args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate:
class Delegator:
_properties = ["foo"]
_methods = ["bar"]
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ="property",
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
)
delegate = self.Delegate(self.Delegator())
with pytest.raises(TypeError):
delegate.foo
with pytest.raises(TypeError):
delegate.foo = 5
with pytest.raises(TypeError):
delegate.foo()
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops:
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and obj.is_boolean()) or not obj._can_hold_na:
# don't test boolean / integer dtypes
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name="a")
self.int_index = tm.makeIntIndex(10, name="a")
self.float_index = tm.makeFloatIndex(10, name="a")
self.dt_index = tm.makeDateIndex(10, name="a")
self.dt_tz_index = tm.makeDateIndex(10, name="a").tz_localize(tz="US/Eastern")
self.period_index = tm.makePeriodIndex(10, name="a")
self.string_index = tm.makeStringIndex(10, name="a")
self.unicode_index = tm.makeUnicodeIndex(10, name="a")
arr = np.random.randn(10)
self.bool_series = Series(arr, index=self.bool_index, name="a")
self.int_series = Series(arr, index=self.int_index, name="a")
self.float_series = Series(arr, index=self.float_index, name="a")
self.dt_series = Series(arr, index=self.dt_index, name="a")
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name="a")
self.string_series = Series(arr, index=self.string_index, name="a")
self.unicode_series = Series(arr, index=self.unicode_index, name="a")
types = ["bool", "int", "float", "dt", "dt_tz", "period", "string", "unicode"]
self.indexes = [getattr(self, "{}_index".format(t)) for t in types]
self.series = [getattr(self, "{}_series".format(t)) for t in types]
# To test narrow dtypes, we use narrower *data* elements, not *index* elements
index = self.int_index
self.float32_series = Series(arr.astype(np.float32), index=index, name="a")
arr_int = np.random.choice(10, size=10, replace=False)
self.int8_series = Series(arr_int.astype(np.int8), index=index, name="a")
self.int16_series = Series(arr_int.astype(np.int16), index=index, name="a")
self.int32_series = Series(arr_int.astype(np.int32), index=index, name="a")
self.uint8_series = Series(arr_int.astype(np.uint8), index=index, name="a")
self.uint16_series = Series(arr_int.astype(np.uint16), index=index, name="a")
self.uint32_series = Series(arr_int.astype(np.uint32), index=index, name="a")
nrw_types = ["float32", "int8", "int16", "int32", "uint8", "uint16", "uint32"]
self.narrow_series = [getattr(self, "{}_series".format(t)) for t in nrw_types]
self.objs = self.indexes + self.series + self.narrow_series
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(getattr(o.index, op), index=o.index, name="a")
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these could be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(
expected, np.ndarray
):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
err = AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
err = TypeError
with pytest.raises(err):
getattr(o, op)
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_binary_ops_docs(self, klass):
op_map = {
"add": "+",
"sub": "-",
"mul": "*",
"mod": "%",
"pow": "**",
"truediv": "/",
"floordiv": "//",
}
for op_name in op_map:
operand1 = klass.__name__.lower()
operand2 = "other"
op = op_map[op_name]
expected_str = " ".join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = " ".join([operand2, op, operand1])
assert expected_str in getattr(klass, "r" + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super().setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
result = None != o # noqa
assert result.iat[0]
assert result.iat[1]
if is_datetime64_dtype(o) or is_datetime64tz_dtype(o):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ["shape", "dtype", "T", "nbytes"]:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ["flags", "strides", "itemsize"]:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, "base")
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
with tm.assert_produces_warning(FutureWarning):
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
# take-based repeat
indices = np.repeat(np.arange(len(o)), range(1, len(o) + 1))
rep = values.take(indices)
o = klass(rep, index=idx, name="a")
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(
range(10, 0, -1), index=expected_index, dtype="int64", name="a"
)
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == "a"
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
assert result.dtype == orig.dtype
elif is_datetime64tz_dtype(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(
result.astype(object), orig._values.astype(object)
)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert result.dtype == orig.dtype
assert o.nunique() == len(np.unique(o.values))
@pytest.mark.parametrize("null_obj", [np.nan, None])
def test_value_counts_unique_nunique_null(self, null_obj):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetime64tz_dtype(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = pd.NaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = "a"
else:
if isinstance(o, DatetimeIndex):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = "a"
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name="a")
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(
list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype="int64",
name="a",
)
expected_s = Series(
list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype="int64",
name="a",
)
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == "a"
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == "a"
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result, Index(values[1:], name="a"))
elif is_datetime64tz_dtype(o):
# unable to compare NaT / nan
tm.assert_extension_array_equal(result[1:], values[2:])
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_inferred(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list("acbd")).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list("cdab"))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"])
tm.assert_series_equal(hist, expected)
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_bins(self, klass):
s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
s = klass(s_values)
# bins
with pytest.raises(TypeError):
s.value_counts(bins=1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
s = klass(s_values)
expected = Series([4, 3, 2], index=["b", "a", "d"])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(["a", "b", np.nan, "d"])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(["a", "b", np.nan, "d"], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize("klass", [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(
[
"xxyyzz20100101PIE",
"xxyyzz20100101GUM",
"xxyyzz20100101EGG",
"xxyyww20090101EGG",
"foofoo20080909PIE",
"foofoo20080909GUM",
]
)
f = StringIO(txt)
df = pd.read_fwf(
f, widths=[6, 8, 3], names=["person_id", "dt", "food"], parse_dates=["dt"]
)
s = klass(df["dt"].copy())
s.name = None
idx = pd.to_datetime(
["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
)
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(
["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
dtype="datetime64[ns]",
)
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df["dt"].copy()
s = klass(list(s.values) + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == "datetime64[ns]"
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == "datetime64[ns]"
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name="dt")
result = td.value_counts()
expected_s = Series([6], index=[Timedelta("1day")], name="dt")
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(["1 days"], name="dt")
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name="dt")
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
codes, uniques = o.factorize()
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig), check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques, check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array(
[5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.intp
)
codes, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(
uniques, Index(orig).sort_values(), check_names=False
)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4], np.intp)
codes, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(codes, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name="a")
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True], dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep="last")
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep="last")
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with pytest.raises(
TypeError,
match=(
r"drop_duplicates\(\) got an " r"unexpected keyword argument"
),
):
idx.drop_duplicates(inplace=True)
else:
expected = Series(
[False] * len(original), index=original.index, name="a"
)
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name="a")
expected = Series(
[False] * len(original) + [True, True], index=idx, name="a"
)
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep="last"), expected)
tm.assert_series_equal(
s.drop_duplicates(keep="last"), s[~np.array(base)]
)
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name="a")
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(
s.drop_duplicates(keep=False), s[~np.array(base)]
)
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected, dtype=orig.dtype)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if is_object_dtype(o) or (
isinstance(o, Series) and is_object_dtype(o.index)
):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert (
o.memory_usage(index=False) + o.index.memory_usage()
) == o.memory_usage(index=True)
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
def test_getitem(self):
for i in self.indexes:
s = pd.Series(i)
assert i[0] == s.iloc[0]
assert i[5] == s.iloc[5]
assert i[-1] == s.iloc[-1]
assert i[-1] == i[9]
with pytest.raises(IndexError):
i[20]
with pytest.raises(IndexError):
s.iloc[20]
@pytest.mark.parametrize("indexer_klass", [list, pd.Index])
@pytest.mark.parametrize(
"indexer",
[
[True] * 10,
[False] * 10,
[True, False, True, True, False, False, True, True, False, True],
],
)
def test_bool_indexing(self, indexer_klass, indexer):
# GH 22533
for idx in self.indexes:
exp_idx = [i for i in range(len(indexer)) if indexer[i]]
tm.assert_index_equal(idx[indexer_klass(indexer)], idx[exp_idx])
s = pd.Series(idx)
tm.assert_series_equal(s[indexer_klass(indexer)], s.iloc[exp_idx])
def test_get_indexer_non_unique_dtype_mismatch(self):
# GH 25459
indexes, missing = pd.Index(["A", "B"]).get_indexer_non_unique(pd.Index([0]))
tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)
tm.assert_numpy_array_equal(np.array([0], dtype=np.int64), missing)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
tm.assert_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(1)
with pytest.raises(ValueError, match=self.errmsg):
obj.transpose(axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
tm.assert_equal( | np.transpose(obj) | numpy.transpose |
from math import ceil, floor
import numpy as np
from BoundingBox import BoundingBox
def fibonacci_sphere_section(num_points_whole_sphere: int, bbox: BoundingBox):
lat_min, lat_max, lon_min, lon_max = bbox.lat_min, bbox.lat_max, bbox.lon_min, bbox.lon_max
#print("lat_min: {}, lat_max: {}, lon_min: {}, lon_max: {}".format(lat_min, lat_max, lon_min, lon_max))
ga = (3 - | np.sqrt(5) | numpy.sqrt |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = | N.array([1,3,1]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import pathlib
def counter(counts, car, chosen):
return counts[-1] + 1 if car == chosen else counts[-1]
def main():
transition = {'not_switched':[],'switched':[]}
n_doors = 100
n_trials = 1000
for door in range(3, n_doors):
not_switched = {'count':[0], 'rate':[.0]}
switched = {'count':[0], 'rate':[.0]}
for i in range(0, n_trials):
doors = np.array([j for j in range(door)])
car = | np.random.randint(0, door) | numpy.random.randint |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# test_utils.py
"""
Test utils.
Copyright (c) 2021, <NAME>
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
import unittest
from pydecon.utils import _ensure_positive, radialavg, expand_radialavg
def make_random_blob(ndims, size):
"""Make a random sized and placed blob."""
# make coordinates
x = np.linspace(-1, 1, size)
mesh = np.meshgrid(*((x,) * ndims), indexing="ij")
# randomly generate radii and center
radii = (np.random.rand(ndims) - 0.5) * 0.2
center = | np.random.rand(ndims) | numpy.random.rand |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
test_structure_output
"""
import numpy as np
import mindspore.ops.operations as P
from mindspore import Tensor, context
from mindspore.nn import Cell
from mindspore.ops.functional import depend
context.set_context(mode=context.GRAPH_MODE)
def test_output_const_tuple_0():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.x = (1, 2, 3)
def construct(self):
return self.x
x = (1, 2, 3)
net = Net()
assert net() == x
def test_output_const_tuple_1():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.tuple_1 = (1, 2, 3)
self.tuple_2 = (4, 5, 6)
def construct(self):
ret = self.tuple_1 + self.tuple_2
return ret
net = Net()
assert net() == (1, 2, 3, 4, 5, 6)
def test_output_const_list():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.tuple_1 = [1, 2, 3]
def construct(self):
ret = self.tuple_1
return ret
net = Net()
assert net() == (1, 2, 3)
def test_output_const_int():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.number_1 = 2
self.number_2 = 3
def construct(self):
ret = self.number_1 + self.number_2
return ret
net = Net()
assert net() == 5
def test_output_const_str():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.str = "hello world"
def construct(self):
ret = self.str
return ret
net = Net()
assert net() == "hello world"
def test_output_parameter_int():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
def construct(self, x):
return x
x = Tensor(np.array(88).astype(np.int32))
net = Net()
assert net(x) == x
def test_output_parameter_str():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.x = "hello world"
def construct(self):
return self.x
x = "hello world"
net = Net()
assert net() == x
def test_tuple_tuple_0():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.TensorAdd()
self.sub = P.Sub()
def construct(self, x, y):
xx = self.add(x, x)
yy = self.add(y, y)
xxx = self.sub(x, x)
yyy = self.sub(y, y)
ret = ((xx, yy), (xxx, yyy))
ret = (ret, ret)
return ret
net = Net()
x = Tensor(np.ones([2], np.int32))
y = Tensor(np.zeros([3], np.int32))
net(x, y)
def test_tuple_tuple_1():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.TensorAdd()
self.sub = P.Sub()
def construct(self, x, y):
xx = self.add(x, x)
yy = self.add(y, y)
ret = ((xx, yy), x)
ret = (ret, ret)
return ret
net = Net()
x = Tensor(np.ones([2], np.int32))
y = Tensor(np.zeros([3], np.int32))
net(x, y)
def test_tuple_tuple_2():
class Net(Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.TensorAdd()
self.sub = P.Sub()
self.relu = P.ReLU()
self.depend = depend
def construct(self, x, y):
xx = self.add(x, x)
yy = self.add(y, y)
xxx = self.sub(x, x)
yyy = self.sub(y, y)
z = self.relu(x)
ret = ((xx, yy), (xxx, yyy))
ret = (ret, ret)
ret = self.depend(ret, z)
return ret
net = Net()
x = Tensor( | np.ones([2], np.int32) | numpy.ones |
import sys
import matplotlib.pyplot
import pandas
import numpy
import struct
import os
data = None
with open(sys.argv[1], "rb") as f:
data = f.read()
data = struct.unpack("{}Q".format(len(data) // 8), data)
data = numpy.array(data, dtype=numpy.uint64)[1:]
data = numpy.array([x for x in data if x < 100000])
rt = numpy.cumsum(data) / 1000000
lTime = rt[-1]
lTime += 5
lScalar = ((lTime // 60) + 1)
lTime = lScalar * 60
data = 1000000 / data
highest = | numpy.max(data) | numpy.max |
import numpy as np
import matplotlib.pyplot as plt
Ts = 2.
Ns = 100
tr = np.linspace(0, Ts, Ns)
Di = 1.3
tri = tr / Di
x = | np.sin(2 * np.pi * 10 * tr) | numpy.sin |
from numpy.lib.polynomial import poly
from numpy.lib.twodim_base import tri
from plateaupy.plobj import plmesh, plobj
from plateaupy.plutils import *
from plateaupy.ploptions import ploptions
from plateaupy.thirdparty.earcutpython.earcut.earcut import earcut
import numpy as np
import copy
import pickle
import sys
import os
import cv2
from lxml import etree
import lxml
_floorheight = 2 # fixed value, the height of 1 floor in meter.
class Building:
def __init__(self):
self.id = None # gml:id
self.attr = dict()
self.usage = None
self.measuredHeight = None
self.storeysAboveGround = None
self.storeysBelowGround = None
self.address = None
self.buildingDetails = dict()
self.extendedAttribute = dict()
self.lod0RoofEdge = []
self.lod1Solid = []
#self.lod2Solid = []
# lod2MultiSurface
self.lod2ground = dict()
self.lod2roof = dict()
self.lod2wall = dict()
self.partex = appParameterizedTexture()
def __str__(self):
return 'Building id={}\n\
usage={}, measuredHeight={}, storeysAboveGround={}, storeysBelowGround={}\n\
address={}\n\
buildingDetails={}\n\
extendedAttribute={}\n\
attr={}'\
.format(self.id, self.usage, self.measuredHeight, self.storeysAboveGround, self.storeysBelowGround, \
self.address, self.buildingDetails, self.extendedAttribute, self.attr)
# get vertices, triangles from lod0RoofEdge
def getLOD0polygons(self, height=None):
vertices = None
triangles = None
if len(self.lod0RoofEdge) > 0:
vertices = []
for x in self.lod0RoofEdge[0]:
xx = copy.deepcopy(x)
if height is not None:
xx[2] = height
vertices.append( convertPolarToCartsian( *xx ) )
vertices = np.array(vertices)
res = earcut(np.array(vertices,dtype=np.int).flatten(), dim=3)
if len(res) > 0:
triangles = np.array(res).reshape((-1,3))
return vertices, triangles
class appParameterizedTexture:
def __init__(self):
self.imageURI = None
self.targets = dict()
@classmethod
def search_list(cls, applist, polyid):
for app in applist:
if polyid in app.targets.keys():
return app
return None
class plbldg(plobj):
def __init__(self,filename=None, options=ploptions()):
super().__init__()
self.kindstr = 'bldg'
self.buildings = [] # list of Building
if filename is not None:
self.loadFile(filename, options=options)
def loadFile(self,filename, options=ploptions()):
tree, root = super().loadFile(filename)
nsmap = self.removeNoneKeyFromDic(root.nsmap)
# scan appearanceMember
partex = []
for app in tree.xpath('/core:CityModel/app:appearanceMember/app:Appearance/app:surfaceDataMember/app:ParameterizedTexture', namespaces=nsmap):
par = appParameterizedTexture()
for at in app.xpath('app:imageURI', namespaces=nsmap):
par.imageURI = at.text
for at in app.xpath('app:target', namespaces=nsmap):
uri = at.attrib['uri']
colist = [str2floats(v).reshape((-1,2)) for v in at.xpath('app:TexCoordList/app:textureCoordinates', namespaces=nsmap)]
maxnum = max(map(lambda x:x.shape[0],colist))
for cidx,co in enumerate(colist):
last = co[-1].reshape(-1,2)
num = maxnum - co.shape[0]
if num > 0:
colist[cidx] = np.append(co,np.tile(co[-1].reshape(-1,2),(num,1)),axis=0)
par.targets[uri] = np.array(colist)
partex.append(par)
# scan cityObjectMember
blds = tree.xpath('/core:CityModel/core:cityObjectMember/bldg:Building', namespaces=nsmap)
for bld in blds:
b = Building()
# gml:id
b.id = bld.attrib['{'+nsmap['gml']+'}id']
# stringAttribute
stringAttributes = bld.xpath('gen:stringAttribute', namespaces=nsmap)
for at in stringAttributes:
b.attr[at.attrib['name']] = at.getchildren()[0].text
# genericAttributeSet
genericAttributeSets = bld.xpath('gen:genericAttributeSet', namespaces=nsmap)
for at in genericAttributeSets:
vals = dict()
for ch in at.getchildren():
vals[ ch.attrib['name'] ] = ch.getchildren()[0].text
b.attr[at.attrib['name']] = vals
# usage
for at in bld.xpath('bldg:usage', namespaces=nsmap):
b.usage = at.text
# measuredHeight
for at in bld.xpath('bldg:measuredHeight', namespaces=nsmap):
b.measuredHeight = at.text
# storeysAboveGround
for at in bld.xpath('bldg:storeysAboveGround', namespaces=nsmap):
b.storeysAboveGround = at.text
# storeysBelowGround
for at in bld.xpath('bldg:storeysBelowGround', namespaces=nsmap):
b.storeysBelowGround = at.text
# address
try: # there are 2 names: 'xAL' and 'xal'..
for at in bld.xpath('bldg:address/core:Address/core:xalAddress/xAL:AddressDetails/xAL:Address', namespaces=nsmap):
b.address = at.text
except lxml.etree.XPathEvalError as e:
for at in bld.xpath('bldg:address/core:Address/core:xalAddress/xal:AddressDetails/xal:Address', namespaces=nsmap):
b.address = at.text
# buildingDetails
for at in bld.xpath('uro:buildingDetails/uro:BuildingDetails', namespaces=nsmap):
for ch in at.getchildren():
tag = ch.tag
tag = tag[ tag.rfind('}')+1: ]
b.buildingDetails[tag] = ch.text
# extendedAttribute
for at in bld.xpath('uro:extendedAttribute/uro:KeyValuePair', namespaces=nsmap):
ch = at.getchildren()
b.extendedAttribute[ch[0].text] = ch[1].text
# lod0RoofEdge
vals = bld.xpath('bldg:lod0RoofEdge/gml:MultiSurface/gml:surfaceMember/gml:Polygon/gml:exterior/gml:LinearRing/gml:posList', namespaces=nsmap)
b.lod0RoofEdge = [str2floats(v).reshape((-1,3)) for v in vals]
# lod1Solid
vals = bld.xpath('bldg:lod1Solid/gml:Solid/gml:exterior/gml:CompositeSurface/gml:surfaceMember/gml:Polygon/gml:exterior/gml:LinearRing/gml:posList', namespaces=nsmap)
b.lod1Solid = [str2floats(v).reshape((-1,3)) for v in vals]
minheight = 0
if options.bHeightZero:
# calc min height
minheight = 10000
for x in b.lod1Solid:
if minheight > np.min(x[:,2]):
minheight = np.min(x[:,2])
if b.storeysBelowGround is not None:
minheight = minheight + (int(b.storeysBelowGround) * _floorheight)
if minheight == 10000:
minheight = 0
for x in b.lod1Solid:
x[:,2] -= minheight
# lod2Solid
# nothing to do for parsing <bldg:lod2Solid>
# lod2MultiSurface : Ground, Roof, Wall
for bb in bld.xpath('bldg:boundedBy/bldg:GroundSurface/bldg:lod2MultiSurface/gml:MultiSurface/gml:surfaceMember/gml:Polygon', namespaces=nsmap):
polyid = '#' + bb.attrib['{'+nsmap['gml']+'}id']
vals = bb.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=nsmap)
surf = [str2floats(v).reshape((-1,3)) for v in vals]
if options.bHeightZero:
if minheight == 0:
# calc min height
minheight = 10000
for x in surf:
if minheight > np.min(x[:,2]):
minheight = np.min(x[:,2])
if b.storeysBelowGround is not None:
minheight = minheight + (int(b.storeysBelowGround) * _floorheight)
if minheight == 10000:
minheight = 0
for x in surf:
x[:,2] -= minheight
b.lod2ground[polyid] = surf
app = appParameterizedTexture.search_list( partex, polyid )
if app is not None:
if b.partex.imageURI is None:
b.partex = app
#elif b.partex.imageURI != app.imageURI:
# print('error')
for bb in bld.xpath('bldg:boundedBy/bldg:RoofSurface/bldg:lod2MultiSurface/gml:MultiSurface/gml:surfaceMember/gml:Polygon', namespaces=nsmap):
polyid = '#' + bb.attrib['{'+nsmap['gml']+'}id']
vals = bb.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=nsmap)
surf = [str2floats(v).reshape((-1,3)) for v in vals]
if options.bHeightZero:
for x in surf:
x[:,2] -= minheight
b.lod2roof[polyid] = surf
app = appParameterizedTexture.search_list( partex, polyid )
if app is not None:
if b.partex.imageURI is None:
b.partex = app
#elif b.partex.imageURI != app.imageURI:
# print('error')
for bb in bld.xpath('bldg:boundedBy/bldg:WallSurface/bldg:lod2MultiSurface/gml:MultiSurface/gml:surfaceMember/gml:Polygon', namespaces=nsmap):
polyid = '#' + bb.attrib['{'+nsmap['gml']+'}id']
vals = bb.xpath('gml:exterior/gml:LinearRing/gml:posList', namespaces=nsmap)
surf = [str2floats(v).reshape((-1,3)) for v in vals]
if options.bHeightZero:
for x in surf:
x[:,2] -= minheight
b.lod2wall[polyid] = surf
app = appParameterizedTexture.search_list( partex, polyid )
if app is not None:
if b.partex.imageURI is None:
b.partex = app
#elif b.partex.imageURI != app.imageURI:
# print('error')
self.buildings.append(b)
# vertices, triangles
if (not options.bUseLOD2texture) or options.bUseLOD0:
mesh = plmesh()
for b in self.buildings:
if options.bUseLOD2texture and (not options.bUseLOD0):
mesh = plmesh()
if options.bUseLOD0:
# LOD0
vertices, triangles = b.getLOD0polygons()
if vertices is not None and triangles is not None:
vstart = len(mesh.vertices)
mesh.vertices.extend( vertices )
mesh.triangles.extend( triangles + vstart )
elif b.lod2ground or b.lod2roof or b.lod2wall:
# LOD2
if options.bUseLOD2texture:
if b.partex.imageURI is not None:
# convert .tif into .png, because o3d.io.read_image() fails.
mesh.texture_filename = os.path.dirname( self.filename ) + '/' + b.partex.imageURI
img = cv2.imread(mesh.texture_filename)
mesh.texture_filename = options.texturedir + '/' + os.path.basename( mesh.texture_filename ) + '.png'
cv2.imwrite(mesh.texture_filename,img)
# ground
for key, value in b.lod2ground.items():
vertices = [ convertPolarToCartsian( *x ) for x in value[0] ]
res = earcut(np.array(vertices,dtype=np.int).flatten(), dim=3)
if len(res) > 0:
vstart = len(mesh.vertices)
mesh.vertices.extend( vertices )
triangles = np.array(res).reshape((-1,3))
mesh.triangles.extend( triangles + vstart )
# texture
if options.bUseLOD2texture:
if key in b.partex.targets.keys():
mesh.triangle_uvs.extend( [ b.partex.targets[key][0,x] for x in triangles.reshape((-1)) ] )
mesh.triangle_material_ids.extend( [0]*len(triangles) )
else: # add dummy uvs, material_ids (The texture can not appear if the numbers of triangles are different between triangles and them.)
mesh.triangle_uvs.extend( [ np.zeros((2)) for x in range(len(triangles)*3) ] )
mesh.triangle_material_ids.extend( [0]*len(triangles) )
# roof
for key, value in b.lod2roof.items():
vertices = [ convertPolarToCartsian( *x ) for x in value[0] ]
res = earcut(np.array(vertices,dtype=np.int).flatten(), dim=3)
if len(res) > 0:
vstart = len(mesh.vertices)
mesh.vertices.extend( vertices )
triangles = np.array(res).reshape((-1,3))
mesh.triangles.extend( triangles + vstart )
# texture
if options.bUseLOD2texture:
if key in b.partex.targets.keys():
mesh.triangle_uvs.extend( [ b.partex.targets[key][0,x] for x in triangles.reshape((-1)) ] )
mesh.triangle_material_ids.extend( [0]*len(triangles) )
# wall
for key, value in b.lod2wall.items():
vertices = [ convertPolarToCartsian( *x ) for x in value[0] ]
res = earcut( | np.array(vertices,dtype=np.int) | numpy.array |
# -*- coding: utf-8 -*-
import random as rn
rn.seed(2)
from numpy.random import seed
seed(2)
from tensorflow import set_random_seed
set_random_seed(2)
import tensorflow as tf
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
import os
import numpy as np
def load_data(name='freq', get_all_special_data=True):
if name == 'freq':
path = 'D:/Python/TAR/dataset/metadata/bags of words'
elif name == 'chi2':
path = 'D:/Python/TAR/chi2_scores/metadata/bags_of_words'
elif name == 'tfidf':
path = 'D:/Python/TAR/tf-idf_scores/metadata/bags_of_words'
else:
raise ValueError
train_negative = path + '/negative'
train_positive = path + '/positive'
test_negative = path + '/negative_test'
test_positive = path + '/positive_test'
special_path = 'D:/Python/TAR/special-data/bags of words'
special_train_negative = special_path + '/negative/'
special_train_positive = special_path + '/positive/'
special_test_negative = special_path + '/negative_test/'
special_test_positive = special_path + '/positive_test/'
#
# load train data
#
train = []
train_X = []
train_S = []
train_y = []
os.chdir(train_negative)
negative_files = os.listdir()
#print('negative train files:', len(negative_files))
for txtfile in negative_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_train_negative + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
train.append([np.array(vector), np.array(special_vector), np.array([1, 0])])
os.chdir(train_positive)
positive_files = os.listdir()
#print('positive train files:', len(positive_files))
for txtfile in positive_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_train_positive + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
train = np.array(train)
#np.random.shuffle(train)
# don't shuffle here, shuffle data controlably when necessary
for sample in train:
train_X.append(sample[0])
train_S.append(sample[1])
train_y.append(sample[2])
#
# load test data
#
test = []
test_X = []
test_S = []
test_y = []
os.chdir(test_negative)
negative_files = os.listdir()
#print('negative test files:', len(negative_files))
for txtfile in negative_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_test_negative + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
test.append([np.array(vector), np.array(special_vector), np.array([1, 0])])
os.chdir(test_positive)
positive_files = os.listdir()
#print('positive test files:', len(positive_files))
for txtfile in positive_files:
with open(txtfile, 'r', encoding='utf8') as file:
vector = file.readlines()
vector = [int(token[:-1]) for token in vector] # remove '\n', convert values to int
special_vector = []
with open(special_test_positive + txtfile, 'r', encoding='utf-8') as sf:
special_vector = sf.readlines()
special_vector = [float(token[:-1]) for token in special_vector]
if get_all_special_data == False:
special_vector = [special_vector[1], special_vector[4], special_vector[5], special_vector[8]]
test.append([np.array(vector), np.array(special_vector), np.array([0, 1])])
test = np.array(test)
#np.random.shuffle(test)
for sample in test:
test_X.append(sample[0])
test_S.append(sample[1])
test_y.append(sample[2])
#print('len(test_y) =', len(test_y))
return np.array(train_X), np.array(train_S), np.array(train_y), np.array(test_X), | np.array(test_S) | numpy.array |
from __future__ import annotations
import math
import warnings
from functools import partial
from itertools import count
from typing import Callable, Sequence, Union
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.stats import chi2
from pykelihood.cached_property import cached_property
from pykelihood.distributions import GPD, Distribution, Exponential
from pykelihood.metrics import (
AIC,
BIC,
Brier_score,
bootstrap,
crps,
opposite_log_likelihood,
qq_l1_distance,
quantile_score,
)
from pykelihood.parameters import ParametrizedFunction
warnings.filterwarnings("ignore")
class Profiler(object):
def __init__(
self,
distribution: Distribution,
data: pd.Series,
score_function: Callable = opposite_log_likelihood,
name: str = "Standard",
inference_confidence: float = 0.99,
single_profiling_param=None,
):
"""
:param distribution: distribution on which the inference is based
:param data: variable of interest
:param score_function: function used for optimisation
:param name: name (optional) of the likelihood if it needs to be compared to other likelihood functions
:param inference_confidence: wanted confidence for intervals
:param fit_chi2: whether the results from the likelihood ratio method must be fitted to a chi2
or a generic chi2 with degree of freedom 1 is used
:param single_profiling_param: parameter that we want to fix to create the profiles based on likelihood
"""
self.name = name
self.distribution = distribution
self.data = data
self.score_function = score_function
self.inference_confidence = inference_confidence
self.single_profiling_param = single_profiling_param
@cached_property
def standard_mle(self):
estimate = self.distribution.fit(self.data)
ll = -opposite_log_likelihood(estimate, self.data)
ll = ll if isinstance(ll, float) else ll[0]
return (estimate, ll)
@cached_property
def optimum(self):
x0 = self.distribution.optimisation_params
estimate = self.distribution.fit_instance(
self.data, score=self.score_function, x0=x0
)
func = -self.score_function(estimate, self.data)
func = func if isinstance(func, float) else func[0]
return (estimate, func)
@cached_property
def profiles(self):
profiles = {}
opt, func = self.optimum
if self.single_profiling_param is not None:
params = [self.single_profiling_param]
else:
params = opt.optimisation_param_dict.keys()
for name, k in opt.optimisation_param_dict.items():
if name in params:
r = float(k)
lb = r - 5 * (10 ** math.floor(math.log10(np.abs(r))))
ub = r + 5 * (10 ** math.floor(math.log10(np.abs(r))))
range = list(np.linspace(lb, ub, 50))
profiles[name] = self.test_profile_likelihood(range, name)
return profiles
def test_profile_likelihood(self, range_for_param, param):
opt, func = self.optimum
profile_ll = []
params = []
for x in range_for_param:
try:
pl = opt.fit_instance(
self.data,
score=self.score_function,
**{param: x},
)
pl_value = -self.score_function(pl, self.data)
pl_value = pl_value if isinstance(pl_value, float) else pl_value[0]
if np.isfinite(pl_value):
profile_ll.append(pl_value)
params.append([p.value for p in pl.flattened_params])
except:
pass
chi2_par = {"df": 1}
lower_bound = func - chi2.ppf(self.inference_confidence, **chi2_par) / 2
filtered_params = pd.DataFrame(
[x + [eval] for x, eval in zip(params, profile_ll) if eval >= lower_bound]
)
cols = list(opt.flattened_param_dict.keys()) + ["score"]
filtered_params = filtered_params.rename(columns=dict(zip(count(), cols)))
return filtered_params
def confidence_interval(self, metric: Callable[[Distribution], float]):
"""
:param metric: function depending on the distribution: it can be one of the parameter (ex: lambda x: x.shape() for a parameter called "shape"),
or a metric relevant to the field of study (ex: the 100-years return level for extreme value analysis by setting lambda x: x.isf(1/100))...
:return: bounds based on parameter profiles for this metric
"""
estimates = []
profiles = self.profiles
if self.single_profiling_param is not None:
params = [self.single_profiling_param]
else:
params = profiles.keys()
for param in params:
columns = list(self.optimum[0].optimisation_param_dict.keys())
result = profiles[param].apply(
lambda row: metric(
self.distribution.with_params({k: row[k] for k in columns}.values())
),
axis=1,
)
estimates.extend(list(result.values))
if len(estimates):
return [np.min(estimates), np.max(estimates)]
else:
return [-np.inf, np.inf]
class DetrendedFluctuationAnalysis(object):
def __init__(
self,
data: pd.DataFrame,
scale_lim: Sequence[int] = None,
scale_step: float = None,
):
"""
:param data: pandas Dataframe, if it contains a column for the day and month, the profiles are normalized
according to the mean for each calendar day averaged over years.
:param scale_lim: limits for window sizes
:param scale_step: steps for window sizes
"""
if not ("month" in data.columns and "day" in data.columns):
print("Will use the total average to normalize the data...")
mean = data["data"].mean()
std = data["data"].std()
data = data.assign(mean=mean).assign(std=std)
else:
mean = (
data.groupby(["month", "day"])
.agg({"data": "mean"})["data"]
.rename("mean")
.reset_index()
)
std = (
data.groupby(["month", "day"])
.agg({"data": "std"})["data"]
.rename("std")
.reset_index()
)
data = data.merge(mean, on=["month", "day"], how="left").merge(
std, on=["month", "day"], how="left"
)
phi = (data["data"] - data["mean"]) / data["std"]
phi = (
phi.dropna()
) # cases where there is only one value for a given day / irrelevant for DFA
self.y = np.cumsum(np.array(phi))
if scale_lim is None:
lim_inf = 10 ** (math.floor(np.log10(len(data))) - 1)
lim_sup = min(
10 ** (math.ceil(np.log10(len(data)))), len(phi)
) # assuming all observations are equally splitted
scale_lim = [lim_inf, lim_sup]
if scale_step is None:
scale_step = 10 ** (math.floor(np.log10(len(data)))) / 2
self.scale_lim = scale_lim
self.scale_step = scale_step
@staticmethod
def calc_rms(x: np.array, scale: int, polynomial_order: int):
"""
windowed Root Mean Square (RMS) with polynomial detrending.
Args:
-----
*x* : numpy.array
one dimensional data vector
*scale* : int
length of the window in which RMS will be calculaed
Returns:
--------
*rms* : numpy.array
RMS data in each window with length len(x)//scale
"""
# making an array with data divided in windows
shape = (x.shape[0] // scale, scale)
X = np.lib.stride_tricks.as_strided(x, shape=shape)
# vector of x-axis points to regression
scale_ax = np.arange(scale)
rms = np.zeros(X.shape[0])
for e, xcut in enumerate(X):
coeff = np.polyfit(scale_ax, xcut, deg=polynomial_order)
xfit = np.polyval(coeff, scale_ax)
# detrending and computing RMS of each window
rms[e] = np.mean((xcut - xfit) ** 2)
return rms
@staticmethod
def trend_type(alpha: float):
if round(alpha, 1) < 1:
if round(alpha, 1) < 0.5:
return "Anti-correlated"
elif round(alpha, 1) == 0.5:
return "Uncorrelated, white noise"
elif round(alpha, 1) > 0.5:
return "Correlated"
elif round(alpha, 1) == 1:
return "Noise, pink noise"
elif round(alpha, 1) > 1:
if round(alpha, 1) < 1.5:
return "Non-stationary, unbounded"
else:
return "Brownian Noise"
def __call__(
self, polynomial_order: int, show=False, ax=None, supplement_title="", color="r"
):
"""
Detrended Fluctuation Analysis - measures power law scaling coefficient
of the given signal *x*.
More details about the algorithm you can find e.g. here:
Kropp, Jürgen, & <NAME>. 2010. Case Studies. Chap. 8-11, pages 167–244 of : In extremis :
disruptive events and trends in climate and hydrology. Springer Science & Business Media.
"""
y = self.y
scales = (
np.arange(self.scale_lim[0], self.scale_lim[1], self.scale_step)
).astype(np.int)
fluct = np.zeros(len(scales))
# computing RMS for each window
for e, sc in enumerate(scales):
fluct[e] = np.sqrt(
np.mean(self.calc_rms(y, sc, polynomial_order=polynomial_order))
)
# as this stage, F^2(s) should be something of the form s^h(2); taking the log should give a linear form of coefficient h(2)
coeff = np.polyfit(np.log(scales), np.log(fluct), 1)
# numpy polyfit returns the highest power first
if show:
import matplotlib
matplotlib.rcParams["text.usetex"] = True
ax = ax or matplotlib.pyplot.gca()
default_title = "Detrended Fluctuation Analysis"
title = (
default_title
if supplement_title == ""
else f"{default_title} {supplement_title}"
)
fluctfit = np.exp(np.polyval(coeff, np.log(scales)))
ax.loglog(scales, fluct, "o", color=color, alpha=0.6)
ax.loglog(
scales,
fluctfit,
color=color,
alpha=0.6,
label=r"DFA-{}, {}: $\alpha$={}".format(
polynomial_order, self.trend_type(coeff[0]), round(coeff[0], 2)
),
)
ax.set_title(title)
ax.set_xlabel(r"$\log_{10}$(time window)")
ax.set_ylabel(r"$\log_{10}$F(t)")
ax.legend(loc="lower right", fontsize="small")
return scales, fluct, coeff[0]
def pettitt_test(data: Union[np.array, pd.DataFrame, pd.Series]):
"""
Pettitt's non-parametric test for change-point detection.
Given an input signal, it reports the likely position of a single switch point along with
the significance probability for location K, approximated for p <= 0.05.
"""
T = len(data)
if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
X = np.array(data).reshape((len(data), 1))
else:
X = data.reshape((len(data), 1))
vector_of_ones = np.ones([1, len(X)])
matrix_col_X = np.matmul(X, vector_of_ones)
matrix_lines_X = matrix_col_X.T
diff = matrix_lines_X - matrix_col_X
diff_sign = np.sign(diff)
U_initial = diff_sign[0, 1:].sum()
sum_of_each_line = diff_sign[1:].sum(axis=1)
cs = sum_of_each_line.cumsum()
U = U_initial + cs
U = list(U)
U.insert(0, U_initial)
loc = np.argmax(np.abs(U))
K = np.max( | np.abs(U) | numpy.abs |
import cv2, os, sys
import numpy as np
import imutils as iu
import sudoku_solver as solver
class ocrClass:
def __init__(self):
samples = np.loadtxt('ml/generalsamples.data',np.float32)
responses = np.loadtxt('ml/generalresponses.data',np.float32)
responses = responses.reshape((responses.size,1))
#.model uses kNearest to perform OCR
self.model = cv2.ml.KNearest_create()
self.model.train(samples, cv2.ml.ROW_SAMPLE, responses)
def getNumber(self, img):
roi = cv2.resize(img, (25,35))
roismall = roi.reshape((1,875))
roismall = np.float32(roismall)
retval, results, neigh_resp, dists = self.model.findNearest(roismall, 1)
predictedNum = int(results[0][0])
return predictedNum
class imageClass:
def __init__(self):
self.captured = []
#.gray is the grayscale captured image
self.gray = []
#.thres is after adaptive thresholding is applied
self.thresh = []
#.contours contains information about the contours found in the image
self.contours = []
self.cuttedThresh = []
self.cuttedOrig = []
self.corners = np.array([])
def captureAndCrop(self, img):
height, width = img.shape[:2]
if height > 800 or width > 800:
if height > width:
self.captured = iu.resize(img, height=800)
else:
self.captured = iu.resize(img, width=800)
else:
self.captured = img
self.gray = cv2.cvtColor(self.captured, cv2.COLOR_BGR2GRAY)
#noise removal with gaussian blur
self.gray = cv2.GaussianBlur(self.gray,(5,5),0)
#then do adaptive thresholding
self.thresh = cv2.adaptiveThreshold(self.gray,255,1,1,11,5)
#cv2.imwrite('out/threshSudoku.png', self.thresh)
#find countours in threshold image
_, contours, _ = cv2.findContours(self.thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
maxArea = 0
biggest = None
for i in contours:
area = cv2.contourArea(i)
if area > 40000:
epsilon = 0.1*cv2.arcLength(i,True)
approx = cv2.approxPolyDP(i,epsilon,True)
#cv2.drawContours(self.captured, [i], 0, (0,0,255), 1)
if area > maxArea and len(approx)==4:
maxArea = area
biggest = i
self.corners = approx
# print( area )
if biggest is not None:
pts1 = np.float32(self.rotateCorners(self.corners))
pts2 = np.float32([[0,0],[0,450],[450,0],[450,450]])
M = cv2.getPerspectiveTransform(pts1,pts2)
self.cuttedThresh = cv2.warpPerspective(self.thresh,M,(450,450))
self.cuttedOrig = cv2.warpPerspective(self.captured,M,(450,450))
#cv2.drawContours(self.captured, [biggest], 0, (0,255,0), 3)
#cv2.imwrite('out/contour.png', self.captured)
cv2.imwrite('out/cuttedThresh.png', self.cuttedThresh)
return self.captured
return None
def readSudoku(self):
img = np.zeros([450,450,3],dtype=np.uint8)
sudoku = np.zeros([9,9],dtype=np.uint32)
#thresh = cv2.adaptiveThreshold(self.cutted,255,1,1,3,1)
#morph = cv2.morphologyEx(thresh,cv2.MORPH_ERODE,None,iterations = 0)
_, contours,_ = cv2.findContours(self.cuttedThresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
ocr = ocrClass()
fieldCount = 0
for i in contours:
area = cv2.contourArea(i)
if area > 50:
[x,y,w,h] = cv2.boundingRect(i)
if h > 15 and h < 45 and w > 8 and w < 45:
fieldCount += 1
roi = self.cuttedThresh[y:y+h,x:x+w]
num = ocr.getNumber(roi)
sudox = int((x+(w/2))//50)
sudoy = int((y+(h/2))//50)
sudoku[sudoy][sudox] = num
#cv2.imwrite('out/fields/' + str(num) + '/' + str(fieldCount) +'.png', roi)
#cv2.drawContours(img, [i], 0, (255,255,255), 1)
#cv2.imwrite('out/contours.png', img)
#cv2.imwrite('out/thresh.png', thresh)
#print ("%i numbers recognized"%fieldCount)
#print ("sudoku:\n", sudoku)
return sudoku
def writeSudoku(self, sudoku):
#solutionImg = np.zeros((450, 450, 4), dtype=np.uint8)
solutionImg = cv2.cvtColor(self.cuttedOrig, cv2.COLOR_RGB2RGBA)
#solutionImg = self.cuttedOrig
for y in range(9):
for x in range(9):
num = sudoku[y][x]
if num != 0:
sx = x * 50 + 15
sy = y * 50 + 38
cv2.putText(solutionImg,str(num),(sx,sy), 0 , 1, (0,0,0, 255), 2, 2)
cv2.imwrite("out/onlySolution.png", solutionImg)
pts1 = np.float32(self.rotateCorners(self.corners))
pts2 = np.float32([[0,0],[0,450],[450,0],[450,450]])
M = cv2.getPerspectiveTransform(pts2,pts1)
width, height = self.captured.shape[:2]
solutionImg = cv2.warpPerspective(solutionImg,M,(height,width))
solution = self.captured
y1, y2 = 0,0 +solutionImg.shape[0]
x1, x2 = 0,0 +solutionImg.shape[1]
alpha_s = solutionImg[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
solution[y1:y2, x1:x2, c] = (alpha_s * solutionImg[:, :, c] +
alpha_l * solution[y1:y2, x1:x2, c])
return solution
def invertSudoku(self, sudoku, solution):
# set all values in the solution which were given in the start sudoku to 0
for row in range(9):
for val in range(9):
if sudoku[row][val] != 0:
solution[row][val] = 0
return solution
def rotateCorners(self, corners):
# rotates the values of corners always in the same order
# top-left, bottom-left, top-right, bottom-right
tl = None # top left
bl = None # bottom left
tr = None # top right
br = None # bottom right
# getting the tl and br by getting the smallest
# and biggest sum of the corner tupel
biggest = 0
smallest = 1000000
rest = []
for corner in corners:
added = corner[0][0] + corner[0][1]
if added > biggest:
biggest = added
br = corner[0]
if added < smallest:
smallest = added
tl = corner[0]
# getting the bl and tr corners
for corner in corners:
if not np.array_equal(corner[0], br) and not | np.array_equal(corner[0], tl) | numpy.array_equal |
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2021 <NAME> and authors of arboEL.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import math
import time
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
import numpy as np
from tqdm import tqdm
import pickle
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import connected_components
from special_partition.special_partition import cluster_linking_partition
from collections import defaultdict
import blink.biencoder.data_process_mult as data_process
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.biencoder.biencoder import BiEncoderRanker
from IPython import embed
def get_query_nn(knn,
embeds,
index,
q_embed,
searchK=None,
gold_idxs=None,
type_idx_mapping=None):
"""
Parameters
----------
knn : int
the number of nearest-neighbours to return
embeds : ndarray
matrix of embeddings
index : faiss
faiss index of the embeddings
q_embed : ndarray
2-D array containing the query embedding
searchK: int
optional parameter, the exact number of nearest-neighbours to retrieve and score
gold_idxs : array
optional parameter, list of golden cui indexes
type_idx_mapping : array
optional parameter, list mapping type-specific indexes to the indexes of the full dictionary
Returns
-------
nn_idxs : array
nearest neighbour indices for the query, sorted in descending order of scores
scores : array
similarity scores for each nearest neighbour, sorted in descending order
"""
# To accomodate the approximate-nature of the knn procedure, retrieve more samples and then filter down
k = searchK if searchK is not None else max(16, 2*knn)
# Find k nearest neighbours
_, nn_idxs = index.search(q_embed, k)
nn_idxs = nn_idxs.astype(np.int64).flatten()
if type_idx_mapping is not None:
nn_idxs = type_idx_mapping[nn_idxs]
nn_embeds = torch.tensor(embeds[nn_idxs]).cuda()
# Compute query-candidate similarity scores
scores = torch.flatten(
torch.mm(torch.tensor(q_embed).cuda(), nn_embeds.T)).cpu()
# Sort the candidates by descending order of scores
nn_idxs, scores = zip(
*sorted(zip(nn_idxs, scores), key=lambda x: -x[1]))
if gold_idxs is not None:
# Calculate the knn index at which the gold cui is found (-1 if not found)
for topk,i in enumerate(nn_idxs):
if i in gold_idxs:
break
topk = -1
# Return only the top k neighbours, and the recall index
return np.array(nn_idxs[:knn], dtype=np.int64), np.array(scores[:knn]), topk
# Return only the top k neighbours
return np.array(nn_idxs[:knn], dtype=np.int64), np.array(scores[:knn])
def partition_graph(graph, n_entities, directed, return_clusters=False):
"""
Parameters
----------
graph : dict
object containing rows, cols, data, and shape of the entity-mention joint graph
n_entities : int
number of entities in the dictionary
directed : bool
whether the graph construction should be directed or undirected
return_clusters : bool
flag to indicate if clusters need to be returned from the partition
Returns
-------
partitioned_graph : coo_matrix
partitioned graph with each mention connected to only one entity
clusters : dict
(optional) contains arrays of connected component indices of the graph
"""
rows, cols, data, shape = graph['rows'], graph['cols'], graph['data'], graph['shape']
rows, cols, data = cluster_linking_partition(
rows,
cols,
data,
n_entities,
directed
)
# Construct the partitioned graph
partitioned_graph = coo_matrix(
(data, (rows, cols)), shape=shape)
if return_clusters:
# Get an array of the graph with each index marked with the component label that it is connected to
_, cc_labels = connected_components(
csgraph=partitioned_graph,
directed=directed,
return_labels=True)
# Store clusters of indices marked with labels with at least 2 connected components
unique_cc_labels, cc_sizes = np.unique(cc_labels, return_counts=True)
filtered_labels = unique_cc_labels[cc_sizes >= 2]
clusters = defaultdict(list)
for i, cc_label in enumerate(cc_labels):
if cc_label in filtered_labels:
clusters[cc_label].append(i)
return partitioned_graph, clusters
return partitioned_graph
def analyzeClusters(clusters, dictionary, queries, knn):
"""
Parameters
----------
clusters : dict
contains arrays of connected component indices of a graph
dictionary : ndarray
entity dictionary to evaluate
queries : ndarray
mention queries to evaluate
knn : int
the number of nearest-neighbour mention candidates considered
Returns
-------
results : dict
Contains n_entities, n_mentions, knn_mentions, accuracy, failure[], success[]
"""
n_entities = len(dictionary)
n_mentions = len(queries)
results = {
'n_entities': n_entities,
'n_mentions': n_mentions,
'knn_mentions': knn,
'accuracy': 0,
'failure': [],
'success': []
}
_debug_n_mens_evaluated, _debug_clusters_wo_entities, _debug_clusters_w_mult_entities = 0, 0, 0
print("Analyzing clusters...")
for cluster in clusters.values():
# The lowest value in the cluster should always be the entity
pred_entity_idx = cluster[0]
# Track the graph index of the entity in the cluster
pred_entity_idxs = [pred_entity_idx]
if pred_entity_idx >= n_entities:
# If the first element is a mention, then the cluster does not have an entity
_debug_clusters_wo_entities += 1
continue
pred_entity = dictionary[pred_entity_idx]
pred_entity_cuis = [str(pred_entity['cui'])]
_debug_tracked_mult_entities = False
for i in range(1, len(cluster)):
men_idx = cluster[i] - n_entities
if men_idx < 0:
# If elements after the first are entities, then the cluster has multiple entities
if not _debug_tracked_mult_entities:
_debug_clusters_w_mult_entities += 1
_debug_tracked_mult_entities = True
# Track the graph indices of each entity in the cluster
pred_entity_idxs.append(cluster[i])
# Predict based on all entities in the cluster
pred_entity_cuis += list(set([dictionary[cluster[i]]['cui']]) - set(pred_entity_cuis))
continue
_debug_n_mens_evaluated += 1
men_query = queries[men_idx]
men_golden_cuis = list(map(str, men_query['label_cuis']))
report_obj = {
'mention_id': men_query['mention_id'],
'mention_name': men_query['mention_name'],
'mention_gold_cui': '|'.join(men_golden_cuis),
'mention_gold_cui_name': '|'.join([dictionary[i]['title'] for i in men_query['label_idxs'][:men_query['n_labels']]]),
'predicted_name': '|'.join([d['title'] for d in [dictionary[i] for i in pred_entity_idxs]]),
'predicted_cui': '|'.join(pred_entity_cuis),
}
# Correct prediction
if not set(pred_entity_cuis).isdisjoint(men_golden_cuis):
results['accuracy'] += 1
results['success'].append(report_obj)
# Incorrect prediction
else:
results['failure'].append(report_obj)
results['accuracy'] = f"{results['accuracy'] / float(_debug_n_mens_evaluated) * 100} %"
print(f"Accuracy = {results['accuracy']}")
# Run sanity checks
assert n_mentions == _debug_n_mens_evaluated
assert _debug_clusters_wo_entities == 0
assert _debug_clusters_w_mult_entities == 0
return results
def main(params):
output_path = params["output_path"]
if not os.path.exists(output_path):
os.makedirs(output_path)
logger = utils.get_logger(params["output_path"], 'log-eval')
pickle_src_path = params["pickle_src_path"]
if pickle_src_path is None or not os.path.exists(pickle_src_path):
pickle_src_path = output_path
embed_data_path = params["embed_data_path"]
if embed_data_path is None or not os.path.exists(embed_data_path):
embed_data_path = output_path
# Init model
reranker = BiEncoderRanker(params)
reranker.model.eval()
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
knn = params["knn"]
use_types = params["use_types"]
data_split = params["data_split"] # Default = "test"
# Load test data
entity_dictionary_loaded = False
test_dictionary_pkl_path = os.path.join(pickle_src_path, 'test_dictionary.pickle')
test_tensor_data_pkl_path = os.path.join(pickle_src_path, 'test_tensor_data.pickle')
test_mention_data_pkl_path = os.path.join(pickle_src_path, 'test_mention_data.pickle')
if os.path.isfile(test_dictionary_pkl_path):
print("Loading stored processed entity dictionary...")
with open(test_dictionary_pkl_path, 'rb') as read_handle:
test_dictionary = pickle.load(read_handle)
entity_dictionary_loaded = True
if os.path.isfile(test_tensor_data_pkl_path) and os.path.isfile(test_mention_data_pkl_path):
print("Loading stored processed test data...")
with open(test_tensor_data_pkl_path, 'rb') as read_handle:
test_tensor_data = pickle.load(read_handle)
with open(test_mention_data_pkl_path, 'rb') as read_handle:
mention_data = pickle.load(read_handle)
else:
test_samples = utils.read_dataset(data_split, params["data_path"])
if not entity_dictionary_loaded:
with open(os.path.join(params["data_path"], 'dictionary.pickle'), 'rb') as read_handle:
test_dictionary = pickle.load(read_handle)
# Check if dataset has multiple ground-truth labels
mult_labels = "labels" in test_samples[0].keys()
if params["filter_unlabeled"]:
# Filter samples without gold entities
test_samples = list(filter(lambda sample: (len(sample["labels"]) > 0) if mult_labels else (sample["label"] is not None), test_samples))
logger.info("Read %d test samples." % len(test_samples))
mention_data, test_dictionary, test_tensor_data = data_process.process_mention_data(
test_samples,
test_dictionary,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
multi_label_key="labels" if mult_labels else None,
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
knn=knn,
dictionary_processed=entity_dictionary_loaded
)
print("Saving processed test data...")
if not entity_dictionary_loaded:
with open(test_dictionary_pkl_path, 'wb') as write_handle:
pickle.dump(test_dictionary, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(test_tensor_data_pkl_path, 'wb') as write_handle:
pickle.dump(test_tensor_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(test_mention_data_pkl_path, 'wb') as write_handle:
pickle.dump(mention_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
# Store test dictionary token ids
test_dict_vecs = torch.tensor(
list(map(lambda x: x['ids'], test_dictionary)), dtype=torch.long)
# Store test mention token ids
test_men_vecs = test_tensor_data[:][0]
n_entities = len(test_dict_vecs)
n_mentions = len(test_tensor_data)
# Values of k to run the evaluation against
knn_vals = [0] + [2**i for i in range(int(math.log(knn, 2)) + 1)]
# Store the maximum evaluation k
max_knn = knn_vals[-1]
time_start = time.time()
# Check if graphs are already built
graph_path = os.path.join(output_path, 'graphs.pickle')
if not params['only_recall'] and os.path.isfile(graph_path):
print("Loading stored joint graphs...")
with open(graph_path, 'rb') as read_handle:
joint_graphs = pickle.load(read_handle)
else:
# Initialize graphs to store mention-mention and mention-entity similarity score edges;
# Keyed on k, the number of nearest mentions retrieved
joint_graphs = {}
for k in knn_vals:
joint_graphs[k] = {
'rows': | np.array([]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 13 16:00:10 2019
@author: nico
"""
import os
import numpy as np
from scipy import signal as sig
import matplotlib.pyplot as plt
import control
os.system ("clear") # limpia la terminal de python
plt.close("all") #cierra todos los graficos
num = np.array([1,0, 0, 0, -1]) #numerador de H[b2, b1, b0]
den = np.array([1, 0, 0, 0, 0])
z, p, k = sig.tf2zpk(num,den)
ww, hh = sig.freqz(num, den)
print("Z =", z, "\n", "P =", p, "\n", "K =", k, "\n")
ww, hh = sig.freqz(num, den)
ww = ww / np.pi
eps = | np.finfo(float) | numpy.finfo |
import numpy as np
import cv2
import glob
import os
import random
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from multiprocessing import Pool
def elastic_transform(image, alpha, sigma, alpha_affine, random_state=None):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications).
.. [Simard2003] <NAME>, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
"""
if random_state is None:
random_state = np.random.RandomState(None)
shape = image.shape
shape_size = shape[:2]
# Random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha
dz = np.zeros_like(dx)
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), | np.reshape(x + dx, (-1, 1)) | numpy.reshape |
"""
Determination of the echo top height from radar PPI data using the
Lakshmanan et al. (2013). method
@title: echotop
@author: <NAME> <<EMAIL>>
@date: 26/08/2021
@institution: Monash University
@reference: Lakshmanan et al. (2013), "An Improved Method for Estimating Radar
Echo-Top Height". Weather Forecast. 28, 481–488,
doi:10.1175/WAF-D-12-00084.1.
.. autosummary::
:toctree: generated/
cloud_top_height
grid_cloud_top
"""
import numpy as np
from numba import jit
from scipy.spatial import cKDTree
@jit
def cloud_top_height(
r, azimuth, elevation, st_sweep, ed_sweep, refl, eth_thld=0, noise_thld=-2, min_range=15e3, verbose=False,
):
"""
Estimating Radar Echo-Top Height using the improved method from Lakshmanan
et al. (2013).
Parameters:
===========
r: <nr>
Radar range.
azimuth: <time>
Radar azimuth.
elevation: <time>
Radar elevation.
st_sweep: <nsweep>
Radar sweep start ray index.
ed_sweep: <nsweep>
Radar sweep end ray index.
refl: <time, nr>
Radar reflectivity.
eth_thld: float
Threshold value (e.g., 0 dBZ, 18 dBZ, ...) used to compute the echo top
noise_thld: float
Signal to noise cutoff threshold value.
min_range: float
Minimum range in meter at which the echo top height are computed to
avoid the cone of silence, generally 15 km.
verbose: bool
Print debug messages
Returns:
========
cloudtop: <na, nr>
Cloud top height in meters, dimensions are na: length of the azimuth
array of the first sweep, and nr: length of the input 'r' array.
"""
earth_radius = 6371000
na0 = st_sweep[1]
nsweep = len(st_sweep)
cloudtop = np.zeros((na0, len(r))) + np.NaN
ground_range = np.zeros((nsweep, len(r)))
elev_ref = elevation[0]
for i, st in enumerate(st_sweep):
ground_range[i, :] = r * np.cos(np.pi * elevation[st + 1] / 180)
for i in range(1, len(st_sweep)):
st = st_sweep[i]
ed = ed_sweep[i]
elev_ref = elevation[st_sweep[i - 1]]
elev_iter = elevation[st]
st_ref = st_sweep[i - 1]
ed_ref = ed_sweep[i - 1]
if verbose:
print(i, st, ed, elev_iter, elev_ref)
for j in range(na0):
nbeam_ref = np.argmin(np.abs(azimuth[st_ref:ed_ref] - azimuth[j])) + st_ref
nbeam_iter = np.argmin(np.abs(azimuth[st:ed] - azimuth[j])) + st
if np.abs(azimuth[nbeam_ref] - azimuth[nbeam_iter]) > 5:
continue
for k in range(len(r)):
if r[k] < min_range:
continue
gr_ref = ground_range[i - 1, k]
npos = np.argmin( | np.abs(ground_range[i, :] - gr_ref) | numpy.abs |
"""
Authors:
<NAME> | @scottgallacher-3
<NAME> | @SimonMolinsky
Contributors:
<NAME> | https://github.com/ethmtrgt
"""
import csv
from operator import itemgetter
import warnings
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
class TheoreticalSemivariogram:
"""
Class calculates theoretical semivariogram. Class takes two parameters during initialization:
points_array - (numpy array) analysed points where the last column represents values, typically x, y, value,
empirical_semivariance - (numpy array) semivariance where first row of array represents lags and the second row
represents semivariance's values for a given lag.
Available methods:
- predict() - method predicts value of the unknown point based on the chosen model,
- fit_semivariance() - Method fits experimental points into chosen semivariance model type,
- find_optimal_model() - Method fits experimental points into all available models and choose one with the lowest
error.
Available theoretical models:
- spherical_model(distance, nugget, sill, semivar_range)
- gaussian_model(distance, nugget, sill, semivar_range)
- exponential_model(distance, nugget, sill, semivar_range)
- linear_model(distance, nugget, sill, semivar_range)
Additional methods:
- calculate_base_error(),
- show_experimental_semivariogram() - shows semivariogram which is a part of the class object's instance,
- show_semivariogram() - shows experimental semivariogram with theoretical model (if it was calculated).
"""
def __init__(self, points_array=None, empirical_semivariance=None, verbose=False):
"""
INPUT:
:param points_array: (numpy array) [point x, point y, value] (optional if model parameters are imported)
:param empirical_semivariance: (numpy array) array of pair of lag and semivariance values where
semivariance[:, 0] = array of lags
semivariance[:, 1] = array of lag's values
semivariance[:, 2] = array of number of points in each lag.
(optional if model parameters are imported)
:param verbose: (bool) if True then all messages are printed, otherwise nothing.
"""
self.points_array = points_array
self.empirical_semivariance = empirical_semivariance
self.verbose = verbose
self.theoretical_model = None
self.chosen_model_name = None
self.nugget = None
self.range = None
self.sill = None
self.model_error = None
self.is_weighted = False
# MODELS
@staticmethod
def spherical_model(lags, nugget, sill, semivar_range):
"""
gamma = nugget + sill*[(3/2)*a - (1/2)*(a**3)], 0 <= lag <= range
gamma = nugget + sill, lag > range
gamma = 0, lag == 0
where:
a = lag / range
INPUT:
:param lags: array of lags from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the spherical model.
"""
a = lags / semivar_range
a1 = 3 / 2 * a
a2 = 1 / 2 * a ** 3
gamma = np.where((lags <= semivar_range),
(nugget + sill * (a1 - a2)),
(nugget + sill))
return gamma
@staticmethod
def exponential_model(lags, nugget, sill, semivar_range):
"""
gamma = nugget + sill*[1 - exp(-lag/range)], distance > 0
gamma = 0, lag == 0
INPUT:
:param lags: array of lags from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the exponential model.
"""
try:
gamma = nugget + sill * (1 - np.exp(-lags / semivar_range))
except TypeError:
lags = lags.astype(float)
semivar_range = float(semivar_range)
gamma = nugget + sill * (1 - np.exp(-lags / semivar_range))
return gamma
@staticmethod
def linear_model(lags, nugget, sill, semivar_range):
"""
gamma = nugget + sill*(lag/range), 0 <= lag <= range
gamma = nugget + sill, lag > range
gamma = 0, lag == 0
INPUT:
:param lags: array of lags from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the linear model.
"""
gamma = np.where((lags <= semivar_range),
(nugget + sill * (lags / semivar_range)),
(nugget + sill))
return gamma
@staticmethod
def gaussian_model(lags, nugget, sill, semivar_range):
"""
gamma = nugget + sill*[1 - exp(-1*(lag**2 / range**2))], lag > 0
gamma = 0, lag == 0
INPUT:
:param lags: array of ranges from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the gaussian model.
"""
gamma = nugget + sill * (1 - np.exp(-1*(lags ** 2 / semivar_range ** 2)))
if lags[0] == 0:
gamma[0] = 0
return gamma
@staticmethod
def power_model(lags, nugget, sill, semivar_range):
"""
gamma = nugget + sill*[1 - exp(lag**2 / range**2)], lag > 0
gamma = 0, lag == 0
INPUT:
:param lags: array of ranges from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the power model.
"""
gamma = nugget + sill * (1 - np.exp((lags ** 2 / semivar_range ** 2)))
if lags[0] == 0:
gamma[0] = 0
return gamma
@staticmethod
def cubic_model(lags, nugget, sill, semivar_range):
"""
gamma = nugget + sill*[7*(a**2) - 8.75*(a**3) + 3.5*(a**5) - 0.75*(a**7)], lag < range
gamma = nugget + sill, lag > range
gamma = 0, lag == 0
where:
a = lag / range
INPUT:
:param lags: array of lags from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the cubic model.
"""
a = lags / semivar_range
a1 = 7 * a ** 2
a2 = -8.75 * a ** 3
a3 = 3.5 * a ** 5
a4 = -0.75 * a ** 7
gamma = np.where((lags < semivar_range), nugget + sill * (a1 + a2 + a3 + a4), nugget + sill)
if lags[0] == 0:
gamma[0] = 0
return gamma
@staticmethod
def circular_model(lags, nugget, sill, semivar_range):
##### NOTE: found two competing model formulae for the circular model
##### 1st one doesn't seem to work with the test data; but 2nd one does
##### Sources added in docstring, further comparison may be needed
##### (DELETE AFTER REVIEW)
"""
gamma = nugget + sill*[1 - (2/np.pi * np.arccos(a)) + np.sqrt(1 - (lag ** 2)/ (range ** 2) )], 0 < lag <= range
OR gamma = nugget + (2/np.pi)*sill*[a * np.sqrt(1 - a ** 2) + np.arcsin(a)], 0 < lag <= range
gamma = 0, lag == 0
where:
a = lag / range
(Model 1 Source:
https://desktop.arcgis.com/en/arcmap/10.3/tools/spatial-analyst-toolbox/how-kriging-works.htm#GUID-94A34A70-DBCF-4B23-A198-BB50FB955DC0)
(Model 2 Source:
https://pro.arcgis.com/en/pro-app/latest/tool-reference/spatial-analyst/how-kriging-works.htm#GUID-94A34A70-DBCF-4B23-A198-BB50FB955DC0)
INPUT:
:param lags: array of ranges from empirical semivariance,
:param nugget: scalar,
:param sill: scalar,
:param semivar_range: optimal range calculated by fit_semivariance method.
OUTPUT:
:return: an array of modeled values for given range. Values are calculated based on the circular model.
"""
# TODO: check conditions:
# apparently, even using np.where uncovers invalid values in the arccos and square root
# but as long as lag <= range this shouldn't happen
# use np.clip on the arrays to be passed
a = lags / semivar_range
# use np.clip to limit range of values passed into np.arccos and np.sqrt
# gamma = np.where((lags <= semivar_range),
# (nugget + sill*(1 - 2/np.pi * np.arccos(np.clip(a, -1, 1)) *
# np.sqrt(1 - np.clip(a**2, -1, 1))) ),
# (nugget + sill))
# second formula found which seems to fit better, and looks as expected
gamma = nugget + (2/np.pi) * sill*(a * np.sqrt(1 - np.clip(a**2, -1, 1)) + np.arcsin(np.clip(a, -1, 1)))
if lags[0] == 0:
gamma[0] = 0
return gamma
def fit_semivariance(self, model_type, number_of_ranges=16, number_of_sill_ranges=16):
"""
Method fits experimental points into chosen semivariance model type.
INPUT:
:param model_type: (str) 'exponential', 'gaussian', 'linear', 'spherical',
:param number_of_ranges: (int) deafult = 16. Used to create an array of equidistant ranges between minimal
range of empirical semivariance and maximum range of empirical semivariance,
:param number_of_sill_ranges: (int) default=16, number of sill ranges to find the optimal one in a grid search.
OUTPUT:
:return: (model_type, model parameters)
"""
# model
models = {
'spherical': self.spherical_model,
'exponential': self.exponential_model,
'linear': self.linear_model,
'gaussian': self.gaussian_model
}
model = models[model_type]
self.chosen_model_name = model_type
self.theoretical_model = model
# sill
variance = np.var(self.points_array[:, -1])
minsill = 0.3 * variance
maxsill = 2 * variance
sills = np.linspace(minsill, maxsill, number_of_sill_ranges)
# nugget
if self.empirical_semivariance[0][0] != 0:
nugget = 0
else:
nugget = self.empirical_semivariance[0][1]
# range
minrange = self.empirical_semivariance[:, 0][1]
maxrange = self.empirical_semivariance[:, 0][-1]
ranges = np.linspace(minrange, maxrange, number_of_ranges)
optimal_range, optimal_sill = self.calculate_range_and_sill(model, ranges, sills, nugget)
# output model
self.nugget = nugget
self.sill = optimal_sill
self.range = optimal_range
# model error
self.model_error = self.calculate_model_error(model, self.nugget, self.sill, self.range)
return model_type
def find_optimal_model(self, weighted=False, number_of_ranges=16, number_of_sill_ranges=16):
"""
Method fits experimental points into all available models and choose one with the lowest error.
INPUT:
:param weighted: (bool) default=False. If True then each lag is weighted by:
sqrt(N(h))/gamma_{exp}(h)
where:
- N(h) - number of point pairs in a given range, gamma_{exp}(h) - value of experimental semivariogram for h.
:param number_of_ranges: (int) default=16. Used to create an array of equidistant ranges between minimal range
of empirical semivariance and maximum range of empirical semivariance,
:param number_of_sill_ranges: (int) default=16, number of sill ranges to find the optimal one in a grid search.
"""
if weighted:
self.is_weighted = True
# models
models = {
'spherical': self.spherical_model,
'exponential': self.exponential_model,
'linear': self.linear_model,
}
# calculate base error for a flat line
base_error = self.calculate_base_error()
# sill
variance = np.var(self.points_array[:, -1])
minsill = 0.3 * variance
maxsill = 2 * variance
sills = np.linspace(minsill, maxsill, number_of_sill_ranges)
# nugget
if self.empirical_semivariance[0][0] != 0:
nugget = 0
else:
nugget = self.empirical_semivariance[0][1]
# range
minrange = self.empirical_semivariance[:, 0][1]
maxrange = self.empirical_semivariance[:, 0][-1]
ranges = np.linspace(minrange, maxrange, number_of_ranges)
# Calculate model errors
model_errors = [('Linear (LS) reference model', base_error, None)]
for model in models:
optimal_range, optimal_sill = self.calculate_range_and_sill(models[model], ranges, sills, nugget)
# output model
model_error = self.calculate_model_error(models[model], nugget, optimal_sill, optimal_range)
model_errors.append((model, model_error, [nugget, optimal_sill, optimal_range]))
if self.verbose:
print('Model: {}, error value: {}'.format(model, model_error))
# Select the best model
sorted_errors = sorted(model_errors, key=itemgetter(1))
if sorted_errors[0][0] == 'Linear (LS) reference model':
# This is unlikely case when error estimated as the squared distance between extrapolated values and
# x axis is smaller than models' errors
model_name = sorted_errors[1][0]
model_error = sorted_errors[1][1]
model_params = sorted_errors[1][2]
warning_msg = 'WARNING: linear model fitted to the experimental variogram is better than the core models!'
warnings.warn(warning_msg)
if self.verbose:
print('Chosen model: {}, with value of: {}.'.format(
model_name, model_error
))
else:
model_name = sorted_errors[0][0]
model_error = sorted_errors[0][1]
model_params = sorted_errors[0][2]
if self.verbose:
print('Chosen model: {}, with value: {}.'.format(
model_name, model_error
))
self.theoretical_model = models[model_name]
self.nugget = model_params[0]
self.sill = model_params[1]
self.range = model_params[2]
self.chosen_model_name = model_name
self.model_error = model_error
return model_name
def calculate_range_and_sill(self, model, possible_ranges, possible_sills, nugget):
errors = []
ranges_and_sills = []
for r in possible_ranges:
for s in possible_sills:
x = (self.empirical_semivariance[:, 1] - model(self.empirical_semivariance[:, 0], nugget, s, r))
x = x * x
errors.append(np.mean(x))
ranges_and_sills.append((r, s))
best_pos = np.argmin(errors)
opt_range, opt_sill = ranges_and_sills[best_pos]
return opt_range, opt_sill
def calculate_range(self, model, ranges, nugget, sill):
# TODO: deprecated method
errors = []
for r in ranges:
x = (self.empirical_semivariance[:, 1] - model(self.empirical_semivariance[:, 0], nugget, sill, r))
x = x ** 2
errors.append(np.mean(x))
range_pos = np.argmin(errors)
optimal_rg = ranges[range_pos]
return optimal_rg
def calculate_values(self):
output_model = self.theoretical_model(self.empirical_semivariance[:, 0],
self.nugget,
self.sill,
self.range)
return output_model
@staticmethod
def _curve_fit_function(x, a, b):
"""
Method fits data into a 1st order polynomial curve where:
y = a * x + b
INPUT:
:param a: number or numpy array,
:param b: number or numpy array,
:param x: number or numpy array.
OUTPUT:
:return: y -> a * x + b | number or numpy array.
"""
y = a * x + b
return y
def _get_weights(self):
"""
Method creates weights based on the lags for each semivariogram point
OUTPUT:
:returns: (numpy array)
"""
nh = np.sqrt(self.empirical_semivariance[:, 2])
vals = self.empirical_semivariance[:, 1]
nh_divided_by_vals = np.divide(nh,
vals,
out=np.zeros_like(nh),
where=vals != 0)
return nh_divided_by_vals
def calculate_base_error(self):
"""
Method calculates base error as the difference between the least squared model
of experimental semivariance and the experimental semivariance points.
OUTPUT:
:returns: (float) mean squared difference error
"""
popt, _pcov = curve_fit(self._curve_fit_function,
self.empirical_semivariance[:, 0],
self.empirical_semivariance[:, 1])
a, b = popt
y = self._curve_fit_function(self.empirical_semivariance[:, 0],
a, b)
error = np.sqrt((self.empirical_semivariance[:, 1] - y) ** 2)
if not self.is_weighted:
mean_error = np.mean(error)
return mean_error
else:
weights = self._get_weights()
mean_error = np.mean(weights * error)
return mean_error
def calculate_model_error(self, model, nugget, sill, semivar_range):
"""
Function calculates error between specific models and experimental curve.
OUTPUT:
:returns: (float) mean squared difference between model and experimental variogram.
"""
error = np.sqrt((self.empirical_semivariance[:, 1] - model(self.empirical_semivariance[:, 0],
nugget,
sill,
semivar_range)) ** 2)
if not self.is_weighted:
return np.mean(error)
else:
weights = self._get_weights()
return | np.mean(weights * error) | numpy.mean |
import numpy as np
from mountain_scooter import MountainScooter
from Particle import Particle
| np.random.seed(71) | numpy.random.seed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.