prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Simulation tools for generating fake images
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
def rotate_CD_matrix(cd, pa_aper):
"""Rotate CD matrix
Parameters
----------
cd: (2,2) array
CD matrix
pa_aper: float
Position angle, in degrees E from N, of y axis of the detector
Returns
-------
cd_rot: (2,2) array
Rotated CD matrix
Comments
--------
`astropy.wcs.WCS.rotateCD` doesn't work for non-square pixels in that it
doesn't preserve the pixel scale! The bug seems to come from the fact
that `rotateCD` assumes a transposed version of its own CD matrix.
For example:
>>> import astropy.wcs as pywcs
>>>
>>> ## Nominal rectangular WFC3/IR pixel
>>> cd_wfc3 = np.array([[ 2.35945978e-05, 2.62448998e-05],
>>> [ 2.93050803e-05, -2.09858771e-05]])
>>>
>>> ## Square pixel
>>> cd_square = np.array([[0.1/3600., 0], [0, 0.1/3600.]])
>>>
>>> for cd, label in zip([cd_wfc3, cd_square], ['WFC3/IR', 'Square']):
>>> wcs = pywcs.WCS()
>>> wcs.wcs.cd = cd
>>> wcs.rotateCD(45.)
>>> print '%s pixel: pre=%s, rot=%s' %(label,
>>> np.sqrt((cd**2).sum(axis=0))*3600,
>>> np.sqrt((wcs.wcs.cd**2).sum(axis=0))*3600)
WFC3/IR pixel: pre=[ 0.1354 0.121 ], rot=[ 0.1282 0.1286]
Square pixel: pre=[ 0.1 0.1], rot=[ 0.1 0.1]
"""
rad = np.deg2rad(-pa_aper)
mat = np.zeros((2,2))
mat[0,:] = np.array([np.cos(rad),-np.sin(rad)])
mat[1,:] = np.array([np.sin(rad),np.cos(rad)])
cd_rot = np.dot(mat, cd)
return cd_rot
def niriss_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
filter='F150W', grism='GR150R'):
"""Make JWST/NIRISS image header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
filter: str
Blocking filter to use.
grism: str
Grism to use
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
NIRISS: 0.065"/pix, requires filter & grism specification
"""
naxis = 2048, 2048
crpix = 1024, 1024
cd = np.array([[ -0.0658, 0], [0, 0.0654]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' %(i+1)] = naxis[i]
h['CRPIX%d' %(i+1)] = crpix[i]
h['CDELT%d' %(i+1)] = 1.0
for j in range(2):
h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]
### Backgrounds
# http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf
bg = {'F090W':0.50, 'F115W':0.47, 'F140M':0.23, 'F150W':0.48, 'F158M':0.25, 'F200W':0.44}
h['BACKGR'] = bg[filter], 'Total, e/s'
h['FILTER'] = filter
h['INSTRUME'] = 'NIRISS'
h['READN'] = 6 , 'Rough, per pixel per 1 ks exposure' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
if grism == 'GR150R':
h['GRISM'] = 'GR150R', 'Spectral trace along X'
else:
h['GRISM'] = 'GR150C', 'Spectral trace along Y'
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def nircam_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
filter='F444W', grism='DFSR'):
"""Make JWST/NIRCAM image header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
filter: str
Blocking filter to use.
grism: str
Grism to use
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
NIRCAM, 0.0648"/pix, requires filter specification
"""
naxis = 2048, 2048
crpix = 1024, 1024
cd = np.array([[ -0.0648, 0], [0, 0.0648]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' %(i+1)] = naxis[i]
h['CRPIX%d' %(i+1)] = crpix[i]
h['CDELT%d' %(i+1)] = 1.0
for j in range(2):
h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]
### Backgrounds
# http://www.stsci.edu/jwst/instruments/niriss/software-tools/wfss-simulations/niriss-wfss-cookbook.pdf
bg = {'F277W':0.30, 'F356W':0.90, 'F444W': 3.00, 'F322W2':1.25, 'F430M':0.65, 'F460M':0.86, 'F410M':0.5} # F410M is a hack, no number
h['BACKGR'] = bg[filter], 'Total, e/s'
h['FILTER'] = filter
h['INSTRUME'] = 'NIRCam'
h['READN'] = 9, 'Rough, per pixel per 1 ks exposure' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
if grism == 'DFSR':
h['GRISM'] = 'DFSR', 'Spectral trace along X'
else:
h['GRISM'] = 'DFSC', 'Spectral trace along Y'
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def wfc3ir_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589,
flt='ibhj34h6q_flt.fits', filter='G141'):
"""Make HST/WFC3-IR image header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
flt: str
Filename of a WFC3/IR FLT file that will be used to provide the
SIP geometric distortion keywords.
filter: str
Grism/filter to use.
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
WFC3 IR, requires reference FLT file for the SIP header
"""
import numpy as np
import astropy.io.fits as pyfits
import astropy.wcs as pywcs
im = pyfits.open(flt)
wcs = pywcs.WCS(im[1].header, relax=True)
thet0 = np.arctan2(im[1].header['CD2_2'], im[1].header['CD2_1'])/np.pi*180
wcs.wcs.crval = np.array([ra, dec])
### Rotate the CD matrix
theta = im[1].header['PA_APER'] - pa_aper
cd_rot = rotate_CD_matrix(wcs.wcs.cd, theta)
wcs.wcs.cd = cd_rot
h = wcs.to_header(relax=True)
for i in [1,2]:
for j in [1,2]:
h['CD%d_%d' %(i,j)] = h['PC%d_%d' %(i,j)]
h.remove('PC%d_%d' %(i,j))
h['BACKGR'] = 1.
h['FILTER'] = filter
h['INSTRUME'] = 'WFC3'
h['READN'] = im[0].header['READNSEA']
h['NAXIS1'] = h['NAXIS2'] = 1014
h['DETECTOR'] = 'IR'
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
return h, wcs
def wfirst_header(ra=53.1592277508136, dec=-27.782056346146, pa_aper=128.589, naxis=(4096,4096)):
"""Make WFIRST WFI header
Parameters
----------
ra, dec: float, float
Coordinates of the center of the image
pa_aper: float
Position angle of the y-axis of the detector
filter: str
Blocking filter to use.
naxis: (int,int)
Image dimensions
Returns
--------
h: astropy.io.fits.Header
FITS header with appropriate keywords
wcs: astropy.wcs.WCS
WCS specification (computed from keywords in `h`).
Comments
--------
WFIRST GRS Grism
Current aXe config file has no field dependence, so field size can be
anything you want in `naxis`.
"""
#naxis = 2048, 2048
crpix = naxis[0]/2., naxis[0]/2.
cd = np.array([[ -0.11, 0], [0, 0.11]])/3600.
cd_rot = rotate_CD_matrix(cd, pa_aper)
h = pyfits.Header()
h['CRVAL1'] = ra
h['CRVAL2'] = dec
h['WCSAXES'] = 2
h['CTYPE1'] = 'RA---TAN'
h['CTYPE2'] = 'DEC--TAN'
for i in range(2):
h['NAXIS%d' %(i+1)] = naxis[i]
h['CRPIX%d' %(i+1)] = crpix[i]
h['CDELT%d' %(i+1)] = 1.0
for j in range(2):
h['CD%d_%d' %(i+1, j+1)] = cd_rot[i,j]
h['BACKGR'] = 0.17+0.49, 'Total, e/s SDT Report A-1'
h['FILTER'] = 'GRS', 'WFIRST grism'
h['INSTRUME'] = 'WFIRST'
h['READN'] = 17, 'SDT report Table 3-3' # e/pix/per
h['PHOTFLAM'] = 1.
h['PHOTPLAM'] = 1.
wcs = pywcs.WCS(h)
h['EXTVER'] = 1
return h, wcs
def make_fake_image(header, output='direct.fits', background=None, exptime=1.e4, nexp=10):
"""Use the header from NIRISS, WFC3/IR or WFIRST and make an 'FLT' image that `grizli` can read as a reference.
Parameters
----------
header: astropy.io.fits.Header
Header created by one of the generating functions, such as
`niriss_header`.
output: str
Filename of the output FITS file. Will have extensions 'SCI', 'ERR',
and 'DQ'. The 'ERR' extension is populated with a read-noise +
background error model using
>>> var = nexp*header['READN'] + background*exptime
The 'SCI' extension is filled with gaussian deviates with standard
deviation `sqrt(var)`.
The 'DQ' extension is filled with (int) zeros.
background: None or float
Background value to use for sky noise. If None, then read from
`header['BACKGR']`.
exptime: float
Exposure time to use for background sky noise.
nexp: int
Number of exposures to use for read noise.
Returns
-------
Nothing; outputs saved in `output` FITS file.
"""
hdu = pyfits.HDUList()
header['EXPTIME'] = exptime
header['NEXP'] = nexp
header['BUNIT'] = 'ELECTRONS/S'
hdu.append(pyfits.PrimaryHDU(header=header))
naxis = (header['NAXIS1'], header['NAXIS2'])
for name, dtype in zip(['SCI', 'ERR', 'DQ'],
[np.float32, np.float32, np.int32]):
hdu.append(pyfits.ImageHDU(header=header,
data=np.zeros(np.array(naxis).T,
dtype=dtype), name=name))
if background == None:
background = header['BACKGR']
header['BACKGR'] = background
### Simple error model of read noise and sky background
var = nexp*header['READN'] + background*exptime
### electrons / s
rms = | np.sqrt(var) | numpy.sqrt |
import numpy as np
from holoviews.core import (HoloMap, GridSpace, Layout, Empty, Dataset,
NdOverlay, DynamicMap, Dimension)
from holoviews.element import Curve, Image, Points, Histogram
from holoviews.streams import Stream
from .testplot import TestBokehPlot, bokeh_renderer
try:
from bokeh.layouts import Column, Row
from bokeh.models import Div, ToolbarBox, GlyphRenderer, Tabs, Panel, Spacer, GridBox
from bokeh.plotting import Figure
except:
pass
class TestLayoutPlot(TestBokehPlot):
def test_layout_update_visible(self):
hmap = HoloMap({i: Curve(np.arange(i), label='A') for i in range(1, 3)})
hmap2 = HoloMap({i: Curve(np.arange(i), label='B') for i in range(3, 5)})
plot = bokeh_renderer.get_plot(hmap+hmap2)
subplot1, subplot2 = [p for k, p in sorted(plot.subplots.items())]
subplot1 = subplot1.subplots['main']
subplot2 = subplot2.subplots['main']
self.assertTrue(subplot1.handles['glyph_renderer'].visible)
self.assertFalse(subplot2.handles['glyph_renderer'].visible)
plot.update((4,))
self.assertFalse(subplot1.handles['glyph_renderer'].visible)
self.assertTrue(subplot2.handles['glyph_renderer'].visible)
def test_layout_title(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
plot = bokeh_renderer.get_plot(hmap1+hmap2)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = ('<span style="color:black;font-family:Arial;font-style:bold;'
'font-weight:bold;font-size:12pt">Default: 0</span>')
self.assertEqual(title.text, text)
def test_layout_title_fontsize(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
layout = Layout([hmap1, hmap2]).opts(plot=dict(fontsize={'title': '12pt'}))
plot = bokeh_renderer.get_plot(layout)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = ('<span style="color:black;font-family:Arial;font-style:bold;'
'font-weight:bold;font-size:12pt">Default: 0</span>')
self.assertEqual(title.text, text)
def test_layout_title_show_title_false(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
layout = Layout([hmap1, hmap2]).opts(plot=dict(show_title=False))
plot = bokeh_renderer.get_plot(layout)
self.assertTrue('title' not in plot.handles)
def test_layout_title_update(self):
hmap1 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
hmap2 = HoloMap({a: Image(np.random.rand(10,10)) for a in range(3)})
plot = bokeh_renderer.get_plot(hmap1+hmap2)
plot.update(1)
title = plot.handles['title']
self.assertIsInstance(title, Div)
text = ('<span style="color:black;font-family:Arial;font-style:bold;'
'font-weight:bold;font-size:12pt">Default: 1</span>')
self.assertEqual(title.text, text)
def test_layout_gridspaces(self):
layout = (GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4)}) +
GridSpace({(i, j): Curve(range(i+j)) for i in range(1, 3)
for j in range(2,4)}) +
Curve(range(10))).cols(2)
layout_plot = bokeh_renderer.get_plot(layout)
plot = layout_plot.state
# Unpack until getting down to two rows
self.assertIsInstance(plot, Column)
self.assertEqual(len(plot.children), 2)
toolbar, grid = plot.children
self.assertIsInstance(toolbar, ToolbarBox)
self.assertIsInstance(grid, GridBox)
self.assertEqual(len(grid.children), 3)
(col1, _, _), (col2, _, _), (fig, _, _) = grid.children
self.assertIsInstance(col1, Column)
self.assertIsInstance(col2, Column)
grid1 = col1.children[0]
grid2 = col2.children[0]
# Check the row of GridSpaces
self.assertEqual(len(grid1.children), 3)
_, (col1, _, _), _ = grid1.children
self.assertIsInstance(col1, Column)
inner_grid1 = col1.children[0]
self.assertEqual(len(grid2.children), 3)
_, (col2, _, _), _ = grid2.children
self.assertIsInstance(col2, Column)
inner_grid2 = col2.children[0]
for grid in [inner_grid1, inner_grid2]:
self.assertEqual(len(grid.children), 4)
(gfig1, _, _), (gfig2, _, _), (gfig3, _, _), (gfig4, _, _) = grid.children
self.assertIsInstance(gfig1, Figure)
self.assertIsInstance(gfig2, Figure)
self.assertIsInstance(gfig3, Figure)
self.assertIsInstance(gfig4, Figure)
def test_layout_instantiate_subplots(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = bokeh_renderer.get_plot(layout)
positions = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_layout_instantiate_subplots_transposed(self):
layout = (Curve(range(10)) + Curve(range(10)) + Image(np.random.rand(10,10)) +
Curve(range(10)) + Curve(range(10)))
plot = bokeh_renderer.get_plot(layout(plot=dict(transpose=True)))
positions = [(0, 0), (0, 1), (1, 0), (2, 0), (3, 0)]
self.assertEqual(sorted(plot.subplots.keys()), positions)
def test_empty_adjoint_plot(self):
adjoint = Curve([0,1,1,2,3]) << Empty() << Curve([0,1,1,0,1])
plot = bokeh_renderer.get_plot(adjoint)
adjoint_plot = plot.subplots[(0, 0)]
self.assertEqual(len(adjoint_plot.subplots), 3)
grid = plot.state.children[1]
(f1, _, _), (f2, _, _), (s1, _, _) = grid.children
self.assertIsInstance(s1, Spacer)
self.assertEqual(s1.width, 0)
self.assertEqual(s1.height, 0)
self.assertEqual(f1.plot_height, f2.plot_height)
def test_layout_plot_with_adjoints(self):
layout = (Curve([]) + Curve([]).hist()).cols(1)
plot = bokeh_renderer.get_plot(layout)
toolbar, grid = plot.state.children
self.assertIsInstance(toolbar, ToolbarBox)
self.assertIsInstance(grid, GridBox)
for (fig, _, _) in grid.children:
self.assertIsInstance(fig, Figure)
self.assertTrue([len([r for r in f.renderers if isinstance(r, GlyphRenderer)])
for (f, _, _) in grid.children], [1, 1, 1])
def test_layout_plot_tabs_with_adjoints(self):
layout = (Curve([]) + Curve([]).hist()).options(tabs=True)
plot = bokeh_renderer.get_plot(layout)
self.assertIsInstance(plot.state, Tabs)
panel1, panel2 = plot.state.tabs
self.assertIsInstance(panel1, Panel)
self.assertIsInstance(panel2, Panel)
self.assertEqual(panel1.title, 'Curve I')
self.assertEqual(panel2.title, 'AdjointLayout I')
def test_layout_shared_source_synced_update(self):
hmap = HoloMap({i: Dataset({chr(65+j): | np.random.rand(i+2) | numpy.random.rand |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 2 11:08:09 2020
@author: alvarezguido
GITHUB: https://github.com/alvarezguido
"""
"""
SYNOPSIS
----
----
-----
"""
import simpy
import random
import numpy as np
import math
#import sys
#import re
import matplotlib.pyplot as plt
#import os
#import operator
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import Axes3D
#import PIL
import random
import re
import os
import datetime
import sys
name = "LT"
mode_debbug = 0
if not mode_debbug:
null = open(os.devnull, 'w')
old_stdout = sys.stdout
sys.stdout = null
####WE START BY USING SF=12 ADN BW=125 AND CR=1, FOR ALL NODES AND ALL TRANSMISIONS######
if mode_debbug:
RANDOM_SEED = 5
chan = 1
packetlen = 20
total_data = 60
beacon_time = 120
maxBSReceives = 16
multi_nodes = [10]
else:
RANDOM_SEED = int(sys.argv[1])
chan = int(sys.argv[2])
packetlen = int(sys.argv[3]) ##NODES SEND PACKETS OF JUST 20 Bytes
total_data = int(sys.argv[4]) ##TOTAL DATA ON BUFFER, FOR EACH NODE (IT'S THE BUFFER O DATA BEFORE START SENDING)
beacon_time = int(sys.argv[5]) ###SAT SENDS BEACON EVERY CERTAIN TIME
maxBSReceives = int(sys.argv[6]) ##MAX NUMBER OF PACKETS THAT BS (ie SATELLITE) CAN RECEIVE AT SAME TIME
multi_nodes = [int(sys.argv[7]), int(sys.argv[8]) ,int(sys.argv[9]), int(sys.argv[10]),int(sys.argv[11]),int(sys.argv[12]),int(sys.argv[13]),int(sys.argv[14]),int(sys.argv[15]),int(sys.argv[16]),int(sys.argv[17]),int(sys.argv[18]),int(sys.argv[19]),int(sys.argv[20])]
random.seed(RANDOM_SEED) #RANDOM SEED IS FOR GENERATE ALWAYS THE SAME RANDOM NUMBERS (ie SAME RESULTS OF SIMULATION)
nodesToSend = []
packetsToSend = math.ceil(total_data/packetlen)
###GLOBAL PARAMS ####
bsId = 1 ##ID OF BASE STATION (NOT USED)
channel = [0,1,2] ##NOT USED BY NOW
avgSendTime = 3 ## NOT USED! --> A NODE SENDS A PACKET EVERY X SECS
back_off = beacon_time * 0.95 ###BACK OFF TIME FOR SEND A PACKET
packetsAtBS = [] ##USED FOR CHEK IF THERE ARE ALREADY PACKETS ON THE SATELLITE
c = 299792.458 ###SPEED LIGHT [km/s]
Ptx = 14
G_device = 0; ##ANTENNA GAIN FOR AN END-DEVICE
G_sat = 12; ##ANTENNA GAIN FOR SATELLITE
nodes = [] ###EACH NODE WILL BE APPENDED TO THIS VARIABLE
freq =868e6 ##USED FOR PATH LOSS CALCULATION
frequency = [868100000, 868300000, 868500000] ##FROM LORAWAN REGIONAL PARAMETERS EU863-870 / EU868
nrLost = 0 ### TOTAL OF LOST PACKETS DUE Lpl
nrCollisions = 0 ##TOTAL OF COLLIDED PACKETS
nrProcessed = 0 ##TOTAL OF PROCESSED PACKETS
nrReceived = 0 ###TOTAL OF RECEIVED PACKETS
##ARRAY WITH MEASURED VALUES FOR SENSIBILITY, NEW VALUES
##THE FOLLOWING VALUES CORRESPOND TO:
# - FIRST ELEMENT: IT'S THE SF (NOT USABLE)
# - SECOND ELEMENT: SENSIBILITY FOR 125KHZ BW
# - THIRD ELEMENT: SENSIBILITY FOR 250KHZ BW
# - FOURTH ELEMENT: SENSIBILITY FOR 500KHZ BW
# NOTICE THAT SENSIBILITY DECREASE ALONG BW INCREASES, ALSO WITH LOWER SF
# THIS VALUES RESPONDS TO:
# wf = -174 + 10 log(BW) +NF +SNRf
sf7 = np.array([7,-123,-120,-117.0])
sf8 = np.array([8,-126,-123,-120.0])
sf9 = np.array([9,-129,-126,-123.0])
sf10 = np.array([10,-132,-129,-126.0])
sf11 = np.array([11,-134.53,-131.52,-128.51])
sf12 = np.array([12,-137,-134,-131.0])
sensi = np.array([sf7,sf8,sf9,sf10,sf11,sf12])
path = "./wider_scenario_2/"
### -137dB IS THE MINIMUN TOLERABLE SENSIBILITY, FOR SF=12 AND BW=125KHz ###
leo_pos=np.loadtxt( path + "LEO-XYZ-Pos.csv",skiprows=1,delimiter=',',usecols=(1,2,3))
## WHERE:
## leo_pos[i,j]:
## i --> the step time in sat pass
## j --> 0 for x-position, 1 for y-position, 2 for z-position
sites_pos = np.loadtxt( path + "SITES-XYZ-Pos.csv",skiprows=1,delimiter=',',usecols=(1,2,3))
## WHERE:
## sites_pos[i,j]:
## i --> the node i
## j --> 0 for x-position, 1 for y-position, 2 for z-position
dist_sat = np.zeros((sites_pos.shape[0],3,leo_pos.shape[0]))
t = 0
for i in range(leo_pos.shape[0]):
t+=1
dist_sat [:,:,i] = leo_pos[i,:] - sites_pos
## WHERE:
## dist_sat[i,j,k]:
## i --> the node i
## j --> 0 for x-position, 1 for y-position, 2 for z-position
## k --> the step time in sat pass
#### FOR COMPUTE DISTANCE MAGNITUDE (ABS) FROM END-DEVICE TO SAT PASSING BY ####
distance = np.zeros((sites_pos.shape[0],leo_pos.shape[0]))
distance[:,:] = (dist_sat[:,0,:]**2 + dist_sat[:,1,:]**2 + dist_sat[:,2,:]**2)**(1/2)
## WHERE:
## distance[i,j]:
## i --> the node i
## j --> the step time in sat pass
##MATRIX FOR LINK BUDGET Lpl ###
Lpl = np.zeros((sites_pos.shape[0],leo_pos.shape[0]))
Lpl = 20*np.log10(distance*1000) + 20*np.log10(freq) - 147.55 #DISTANCE MUST BE IN METERS
## WHERE:
## Lpl[i,j]:
## i --> the node i
## j --> the step time in sat pass
##MATRIX FOR LINK BUDGET, USING Prx ###
Prx = np.zeros((sites_pos.shape[0],leo_pos.shape[0]))
Prx = Ptx + G_sat + G_device -20*np.log10(distance*1000) - 20*np.log10(freq) + 147.55 #DISTANCE IS CONVERTED TO METERS
## WHERE:
## Prx[i,j]:
## i --> the node i
## j --> the step time in sat pass
distance = np.concatenate((distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance,distance))
Lpl = np.concatenate((Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl,Lpl))
Prx = np.concatenate((Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx,Prx))
elev = np.degrees(np.arcsin(599/distance))
IS7 = np.array([1,-8,-9,-9,-9,-9])
IS8 = np.array([-11,1,-11,-12,-13,-13])
IS9 = np.array([-15,-13,1,-13,-14,-15])
IS10 = np.array([-19,-18,-17,1,-17,-18])
IS11 = np.array([-22,-22,-21,-20,1,-20])
IS12 = | np.array([-25,-25,-25,-24,-23,1]) | numpy.array |
import warnings
import numpy as np
from sklearn.utils import check_array
import matplotlib.pyplot as plt
from netanalytics.random_models import ER
def clustering_coefficient(X):
degrees = np.sum(X, axis=1)
D = np.zeros(X.shape[0])
for node in range(X.shape[0]):
neighbors = np.where(X[node,:]!=0)[0]
subset = X[neighbors, :]
subset = subset[:, neighbors]
D[node] = np.sum(subset)/2
C_v = 0
for i, d in enumerate(degrees):
if d <= 1:
continue
#print(D[i])
#print(degrees[i])
C_v += 2*D[i]/(degrees[i]*(degrees[i] -1))
degree_greter = degrees.copy()
degree_greter[np.where(degree_greter<=1)] = 0
#print(np.sum(degree_greter!=0))
C_v /= np.sum(degree_greter!=0)
return C_v
def thresholding(X, mode='5', min_v=0.01, max_v=0.09, make_plot=False,
ax=None, label=''):
"""
Params
------
X: numpy.array, shape=(n,n)
mode: string, optional
The way of thresholding such matrix
- '1' the 1% of the element of each row is taken
- '5' the 5% of the element of each row is taken
- 'global' the 75% of the elements of all the matrix are taken according
to their decreasing order
- 'cl_coeff' the threshold is selected comparing the
clustering coefficient with the one of a random graph
"LEAL, <NAME>; LOPEZ, Camilo; LOPEZ-KLEINE, Liliana.
Construction and comparison of gene co-expression networks shows
complex plant immune responses. PeerJ, 2014, 2: e610."
"""
X = check_array(X)
n, s = X.shape
X_new = X.copy()
mode = str(mode).lower()
if mode == '1' or mode == '5':
how_many = int(round(int(mode)*n/100))
indices = np.argsort(X, axis=1)
to_discard = indices[:, 0:n-how_many]
for r in range(X.shape[0]):
X_new[r, to_discard[r]] = 0
return X_new
if mode == 'global':
indices = np.unravel_index(np.argsort(X, axis=None), X.shape)
how_many = int(round(75/100*X.size))
indices =(indices[0][0:-how_many], indices[1][0:-how_many])
X_new[indices] = 0
return X_new
if mode=='cl_coeff':
with warnings.catch_warnings(RuntimeWarning):
warnings.simplefilter("ignore")
if np.max(X)>1:
X_new = X_new - np.min(X_new)
X_new *= 1/np.max(X)
prev_diff = -5
diffs = []
value = -1
result = None
found = False
for v in np.arange(min_v, max_v, 0.01):
X_old = X_new.copy()
X_new[np.where(X_new<v)] = 0
X_thr = X_new.copy()
X_thr = (X_thr != 0).astype(np.int)
np.fill_diagonal(X_thr, 0)
C_v = clustering_coefficient(X_thr)
N = X_new.shape[0]#np.sum(degrees!=0)
k_bar = np.sum(degrees)/N
k_d = np.sum(degrees**2)/N
C_r_v = (k_d - k_bar)**2/(k_bar**3 *N)
#print("Clustering coefficient %.4f, random clustering coefficient %.4f " % (C_v, C_r_v))
diff = C_v - C_r_v
diffs.append(diff)
if np.abs(diff) < prev_diff and not found:
value = v - 0.01
result = X_old
found = True
prev_diff = np.abs(diff)
if make_plot:
if ax is None:
fig, ax = plt.figure(figsize=(5,5))
ax.plot(np.arange(0, len(diffs)), diffs, marker='o',
label=label)
ax.set_xlabel(r'$\tau_v$')
ax.set_ylabel(r' $|C(\tau_v) - C_r(\tau_v)|$ ')
#plt.xlim(0.01, 0.99)
#plt.xticks(np.arange(0, len(diffs)), (np.arange(0.01, 0.99, 0.01))
#print("Thresholding value %.2f"%value)
return result
def thresholding_generating_graphs(X, min_v=0.01, max_v=0.99, make_plot=False,
ax=None, label='', n_repetitions=10):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
X_new = X - np.min(X)
X_new *= 1/np.max(X)
mean_diffs = []
std_diffs = []
for v in np.arange(min_v, max_v, 0.01):
print("Threshold ", v)
X_old = X_new.copy()
X_new[np.where(X_new<v)] = 0
X_thr = X_new.copy()
X_thr = (X_thr != 0).astype(np.int)
| np.fill_diagonal(X_thr, 0) | numpy.fill_diagonal |
"""
Created: 25/12/2017 00:40:39
Author: <NAME>
Email: <EMAIL>
This source code is partially adopted from https://github.com/bvilhjal/mixmogam/blob/master/linear_models.py
And the R source code of emma and the intel version c code of emmax is referred
The GEMMA paper is referred for the EM and NR algorithm
And the SVS gives a lot of details could not be found any where else
http://doc.goldenhelix.com/SVS/latest/svsmanual/mixedModelMethods/overview.html#overviewofmixedlinearmodels
"""
import sys
this_path = "/home/who/Dropbox/PycharmProjects/myEmmax"
sys.path = [this_path] + sys.path
import numpy as np
import scipy as sp
from numpy import linalg
from scipy import stats
from scipy import optimize
import warnings
import kinship
import re
import inspect, os
import pickle
from dataParsers import parse_plink_tped_file
from dataParsers import parse_plink_fam_phenotype_file
def qr_decomp(X):
"""
QR decomposition. Adaptations to changes between scipy versions.
"""
ver_list = tuple(map(int, (sp.__version__).split('.')[:2]))
if ver_list >= (0, 9):
return linalg.qr(X, mode='reduced') # Do the QR-decomposition for the Gram-Schmidt process.
else:
return linalg.qr(X, econ=True) # Do the QR-decomposition for the Gram-Schmidt process.
class LinearMixedModel:
"""
A class for linear mixed models
"""
def __init__(self, Y=None, k=None, dtype='double'):
"""
The fixed effects should be a list of fixed effect lists (SNPs)
"""
self.n = len(Y)
self.y_var = np.var(Y, ddof=1, dtype=dtype)
self.Y = np.matrix(Y, dtype=dtype)
self.Y.shape = (self.n, 1)
self.X = np.matrix(np.ones((self.n, 1), dtype=dtype)) # The intercept
self.p = 1
self.beta_est = None
self.K = k
# A list of random effect type, and the cov matrix.
self.random_effects = [('normal', np.matrix(np.identity(self.n)))] # The first random effect is the IID error.
def add_factor(self, x, lin_depend_thres=1e-4):
"""
Adds an explanatory variable to the X matrix.
"""
# Checking whether this new cofactor in linearly independent.
new_x = np.array(x)
new_x.shape = len(x)
(beta, rss, rank, sigma) = linalg.lstsq(self.X, new_x)
if float(rss) < lin_depend_thres:
warnings.warn('A factor was found to be linearly dependent on the factors already in the X matrix. '
'Hence skipping it!')
return False
new_x.shape = (self.n, 1)
self.X = np.hstack([self.X, new_x])
self.p += 1
return True
def _get_eigen_L_(self, K=None, dtype='double'): # in the source code of emma, there is a matrix Z,
# I think we should never use Z here
if K is None:
K = self.K
evals, evecs = linalg.eigh(K)
evals = np.array(evals, dtype=dtype)
return {'values': evals, 'vectors': np.mat(evecs, dtype=dtype).T}
def _get_eigen_R_(self, X=None, K=None, hat_matrix=None, dtype='double'):
if X is None:
X = self.X
q = X.shape[1]
if not hat_matrix:
X_squared_inverse = linalg.pinv(X.T * X) # (X.T*X)^{-1}
# linalg.pinv: Calculate the generalized inverse of a matrix using
# its singular-value decomposition (SVD) and including all large singular values.
hat_matrix = X * X_squared_inverse * X.T
if K is None:
K = self.K
S = np.mat( | np.identity(self.n) | numpy.identity |
from __future__ import absolute_import, division
import sys, os
from typing import Dict
import torch
import numpy
import matplotlib
from matplotlib.ticker import ScalarFormatter, AutoMinorLocator
import matplotlib.pyplot as plt
font = {'family': 'monospace',
'size' : 10,
'weight': 'bold'}
plt.rc('axes', linewidth=1.5)
plt.rc('font', **font)
AXIS_FONT_SIZE = 25
class model_meter():
def __init__(self, args: Dict={'target': None,
'pred' : None,
'pred_prob': None,
'label_names': None,
}):
self.target = args['target']
self.pred = args['pred']
self.pred_prob = args['pred_prob']
self.lNames = args['label_names']
self.labelNums = numpy.zeros((1,self.pred_prob.shape[1]))
# label conditional measures
self.confusion = numpy.zeros((self.pred_prob.shape[1], self.pred_prob.shape[1]))
self.sensitivity = numpy.zeros((1,self.pred_prob.shape[1])) # recall, True postive rate
self.specifcity = numpy.zeros((1,self.pred_prob.shape[1])) # True negative rate
self.precision = numpy.zeros((1,self.pred_prob.shape[1]))
# aggregated performance measures
self.accuracy = None
self.balanced_accuracy = None
def conditoinal_measure(self):
# NOTE: The confusion matrix has rows as True values and columns as predictions.
# FIXME: Include support for different cutoff
for k in range(self.pred_prob.shape[1]):
pred_val = (self.pred == k).int()
for j in range(self.pred_prob.shape[1]):
true_val = (self.target == j).int()
match = (pred_val & true_val).int()
self.confusion[j,k] = torch.sum(match).item()
self.labelNums[0,k] = torch.sum((self.target == k).int(), 0).item()
self.sensitivity[0,k] = float(self.confusion[k,k]) / float(self.labelNums[0,k])
if float(torch.sum(pred_val).item()) != 0:
self.precision[0,k] = float(self.confusion[k,k]) / float(torch.sum(pred_val).item())
temp = numpy.sum( | numpy.diag(self.confusion) | numpy.diag |
from keras.models import load_model # Will be used to retrieve the saved weights of the trained NN.
import sklearn.model_selection as sk
import scipy.io as sio
import numpy as np
encoder_size = 4000
encoder = load_model(r'./weights/encoder_weights.h5') # Load the TRAINED encoder.
#decoder = load_model(r'./weights/decoder_weights.h5') # Load the TRAINED decoder.
#loading data
x_train = sio.loadmat('X_Train.mat')
x_train = x_train['X_Train']
x_test = sio.loadmat('X_Test.mat')
x_test = x_test['X_Test']
x_val = sio.loadmat('X_Validation.mat')
x_val = x_val['X_Validation']
#Training features extraction
train_len = x_train.shape
x_train_features = np.zeros((train_len[0], encoder_size))
for i in range(0, train_len[0]-1):
f1 = x_train[i][:]
f1 = f1.reshape(1,15000)
inputs = | np.array(f1) | numpy.array |
import gym
import torch
import numpy as np, reward as rw, torch.nn as nn, matplotlib.pyplot as plt
import torch.nn.functional as F, reward.utils as U
screen_width = 600
device = U.device.get()
MAX_STEPS = 2e5
def get_cart_location():
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
def get_screen():
screen = env.render(mode='rgb_array').transpose((2, 0, 1))
# Strip off the top and bottom of the screen
screen = screen[:, 160:320]
view_width = 320
cart_location = get_cart_location()
if cart_location < view_width // 2: slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2): slice_range = slice(-view_width, None)
else: slice_range = slice(cart_location - view_width // 2, cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
return screen[:, :, slice_range]
class QValueNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(4, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
self.head = nn.Linear(448, 2)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
class Policy:
def __init__(self, qnn, exp_rate):
self.qnn, self._exp_rate = qnn, U.make_callable(exp_rate)
@property
def exp_rate(self): return self._exp_rate(U.global_step.get())
def get_act(self, s):
q = self.qnn(s)
if np.random.random() < self.exp_rate: return U.to_tensor(np.random.choice(len(q.squeeze())), dtype='long', device='cpu')[None]
else: return q.argmax()[None]
env = gym.make('CartPole-v0').unwrapped
def main():
S = rw.space.Image(sz=[40, 80], order='NCHW')
A = rw.space.Categorical(n_acs=env.action_space.n)
exp_rate = U.schedules.linear_schedule(1., .1, int(.3 * MAX_STEPS))
tfms = [rw.tfm.img.Gray(), rw.tfm.img.Resize(sz=[40, 80]), rw.tfm.img.Stack(n=4)]
qnn = QValueNN().to(device)
qnn_targ = QValueNN().to(device).eval()
q_opt = torch.optim.Adam(qnn.parameters())
policy = Policy(qnn=qnn, exp_rate=exp_rate)
logger = U.Logger('/tmp/logs/cp_img/v3-1', maxsteps=MAX_STEPS, logfreq=300)
model = rw.model.DQN(policy=policy, qnn=qnn, qnn_targ=qnn_targ, q_opt=q_opt, targ_up_freq=10, logger=logger, gamma=0.99)
agent = rw.agent.Replay(model=model, logger=logger, s_sp=S, a_sp=A, bs=128, maxlen=1e4)
state = env.reset()
last_screen = get_screen()
new_screen = get_screen()
tsteps = 0
for _ in range(int(MAX_STEPS)):
s = S((new_screen - last_screen)[None])
s = s.apply_tfms(tfms)
a = agent.get_act(s)
_, r, d, _ = env.step(int(a[0].val))
new_screen, last_screen = get_screen(), new_screen
agent.report(r=np.array(r)[None], d= | np.array(d) | numpy.array |
import os
os.chdir('C:/Users/Martim_Pc/Desktop/DACO_fin')
from Unet import Unet
import numpy as np
import cv2.cv2 as cv2
from skimage.measure import label, regionprops
from skimage.transform import downscale_local_mean,hough_ellipse
from skimage.filters.rank import entropy, mean
from skimage.filters import gabor, gabor_kernel
from skimage.morphology import disk, skeletonize, thin
from skimage.feature import local_binary_pattern, greycomatrix, greycoprops
from skimage.draw import ellipse_perimeter
from skimage import data, color, img_as_ubyte
from matplotlib import pyplot as plt
import pickle
import math
import time
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import distance_transform_edt
def myShowImage(img,name = "from_show_function"):
cv2.imshow(name, img)
cv2.waitKey(0) # waits until a key is pressed
cv2.destroyAllWindows() # destroys the window showing image
return
def removeBackground(img):
maskRed = img[...,0]>30
maskGreen = img[...,1]>30
maskBlue = img[...,2]>30
mask1 = np.logical_or(maskRed,maskGreen)
maskFinal = np.logical_or(mask1,maskBlue)
zeros = np.zeros(img.shape)
zeros[maskFinal] = img[maskFinal]
zeros = zeros.astype(np.uint8)
img = np.copy(zeros)
return img
testImages = ['20051020_44982_0100_PP.tif',
'20051019_38557_0100_PP.tif',
'20051213_62383_0100_PP.tif',
'IDRiD_14.jpg',
'OD0375EY.JPG']
def getVesselsUtils(imgPath):
img = cv2.imread(imgPath)#,cv2.CV_8UC1)
img = removeBackground(img)
scale_percent = 25 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA) # BGR - blue: 0; green: 1; red: 2
resized = resized.astype(np.uint8)
clahe = cv2.createCLAHE(clipLimit=1, tileGridSize=(8, 8))
img_out1 = clahe.apply(resized[...,1])
kernel_ones = np.ones([25,25],np.uint8)
bh_1_cv = cv2.morphologyEx(img_out1, cv2.MORPH_BLACKHAT,kernel_ones)
int_vs = clahe.apply(bh_1_cv)
_,thresh2 = cv2.threshold(int_vs,60,255,cv2.THRESH_BINARY) # thresh2 is vessels segmentation used in OD segmentation
labels, _ = label(thresh2, neighbors=8, background = 0, return_num = True)
regions = regionprops(labels)
for region in regions:
value = labels[region['coords'][0][0],region['coords'][0][1]]
#circularity = (4*math.pi*region['area']) / (region['perimeter']**2)
bboxAreRel = region['area']/region['bbox_area']
if region['area'] < 10 or (bboxAreRel > 0.35): #circularity > 0.3 and
removeIndexs = np.where(labels==value)
labels[removeIndexs[0],removeIndexs[1]] = 0
labels[labels > 0] = 1
labelsImg = np.multiply(np.array(labels, np.uint8),255) # labelsImg = segmented relevant vessels
myShowImage(labelsImg)
# get skeleton of image
doublT_small = downscale_local_mean(labels,(2,2))
myShowImage(doublT_small)
skeleton = skeletonize(doublT_small)
skel = skeleton * 1
skel = skel.astype(np.uint8)
skel = np.multiply(skel,255)
myShowImage(skel)
threshAcc = 40
for k in range(1,6):
try:
#fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 4))
#doublT_small = color.gray2rgb(img_as_ubyte(doublT_small))
tic = time.time()
result = hough_ellipse(skeleton, accuracy=20, threshold=threshAcc, min_size=50, max_size=None) # thresh = 30
result.sort(order='accumulator')
aSizeResult_Arr = np.array(result['a'])
bSizeResult_Arr = np.array(result['b'])
aIndex = np.where(aSizeResult_Arr > 0)
bIndex = np.where(bSizeResult_Arr > 0)
relevantIndexs = np.intersect1d(aIndex,bIndex)
axisRelation = np.divide(aSizeResult_Arr[relevantIndexs],bSizeResult_Arr[relevantIndexs])
goodRelationIndexs = np.where(axisRelation<1.5)
ellipsLargest = np.max(relevantIndexs[goodRelationIndexs])
toc = time.time()
ellapsedTime = toc-tic
print(ellapsedTime)
best = list(result[ellipsLargest])
yc, xc, a, b = [int(round(x)) for x in best[1:5]]
orientation = best[5]
#print(best[0])
# Draw the ellipse on the original image
cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)
# Draw the edge (white) and the resulting ellipse (red)
outsideX = np.where(cx>doublT_small.shape[1])
preX = np.where(cx<0)
outsideY = np.where(cy>doublT_small.shape[0])
preY = np.where(cy<0)
cx[outsideX] = doublT_small.shape[1]
cy[outsideY] = doublT_small.shape[0]
cx[preX] = 0
cy[preY] = 0
break
except:
threshAcc = threshAcc - 10
if k == 5:
threshAcc = threshAcc + 5
ellipseMask = np.zeros(doublT_small.shape)
ellipseMask[abs(cy-1),abs(cx-1)] = 1
elipsResized = cv2.resize(ellipseMask, dsize=dim, interpolation=cv2.INTER_CUBIC)
#elipsResized = np.average(elipsResized,axis = 2) # 3 channels -> 1 channel
elipsResized[elipsResized>0.5] = 1
elipsResized[elipsResized<1] = 0
elipsResized = thin(elipsResized)
elipsResized = elipsResized*1
elipsImage = (elipsResized*255).astype(np.uint8)
myShowImage(elipsImage)
entr_img = entropy(labelsImg, disk(5))
myShowImage(entr_img)
vessels = np.copy(thresh2)
ellipse = np.copy(elipsResized)
entropyVessels = np.copy(entr_img)
return vessels, entropyVessels, ellipse
def getDistanceArray(height,width):
indices_Arr = np.indices((height,width)).transpose((1,2,0))
centreCoords = np.array([height/2,width/2])
distance_Arr = np.sqrt(np.add(np.power(indices_Arr[...,0]-centreCoords[0],2),np.power(indices_Arr[...,1]-centreCoords[1],2)))
normDistance_Arr = distance_Arr / np.max(distance_Arr)
normDistanceColumn_Arr = np.squeeze(normDistance_Arr.reshape([1,normDistance_Arr.shape[0]*normDistance_Arr.shape[1]])).T
return normDistanceColumn_Arr
def reshapeFeature(img,featureSize,normalize=True):
feature = np.squeeze(img.reshape([1,featureSize])).T
if normalize:
feature = (feature-np.average(feature)) / np.std(feature)
return feature
def newBBcoords(img_pred_Log,test_image):
# returns coordinates of the bounding box for the region with the largest area
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
labelsImg = np.multiply(np.array(labelsLog, np.uint8),255)
#myShowImage(labelsImg)
sizeBoxX = regionsLog[maxIndex]['bbox'][3]-regionsLog[maxIndex]['bbox'][1]
sizeBoxY = regionsLog[maxIndex]['bbox'][2]-regionsLog[maxIndex]['bbox'][0]
coordsBbox = list(regionsLog[maxIndex]['bbox'])
if sizeBoxX <= 0.4 * img_pred_Log.shape[1]:
newSizeBoxX = 0.3 / (sizeBoxX / img_pred_Log.shape[1])
coordsBbox[1] = coordsBbox[1] - sizeBoxX*(0.5*(newSizeBoxX-1))
coordsBbox[3] = coordsBbox[3] + sizeBoxX*(0.5*(newSizeBoxX-1))
if sizeBoxY <= 0.4 * img_pred_Log.shape[0]:
newSizeBoxY = 0.5 / (sizeBoxY / img_pred_Log.shape[0])
coordsBbox[0] = coordsBbox[0] - sizeBoxY*(0.5*(newSizeBoxY-1))
coordsBbox[2] = coordsBbox[2] + sizeBoxY*(0.5*(newSizeBoxY-1))
if coordsBbox[0] < 0:
coordsBbox[0] = 0
if coordsBbox[1] < 0:
coordsBbox[1] = 0
if coordsBbox[2] > test_image.shape[0]:
coordsBbox[2] = test_image.shape[0] - 1
if coordsBbox[3] > test_image.shape[1]:
coordsBbox[3] = test_image.shape[1] - 1
coordsBboxInt = [round(x) for x in coordsBbox]
return coordsBboxInt
def getLargestAreaEcentroid(img_pred_Log):
# returns mask with the regions with the largest area, coords of centroid and radius
kernel_ones = np.ones([3,3],np.uint8)
closing_Log = cv2.morphologyEx(img_pred_Log, cv2.MORPH_CLOSE, kernel_ones)
labelsLog, numLog = label(closing_Log, neighbors=8, background = 0, return_num = True)
regionsLog = regionprops(labelsLog)
areasLog = [region['area'] for region in regionsLog]
areasLogArr = np.array(areasLog)
maxIndex = np.argmax(areasLogArr)
value = labelsLog[regionsLog[maxIndex]['coords'][0][0],regionsLog[maxIndex]['coords'][0][1]]
labelsLog[labelsLog != value] = 0
labelsLog[labelsLog == value] = 1
centreCoords = np.round(regionsLog[maxIndex]['centroid'])
centreCoords = centreCoords.astype(np.uint)
radius = (regionsLog[maxIndex]['major_axis_length'] + regionsLog[maxIndex]['minor_axis_length']) / 4
colsCoord = [regionsLog[maxIndex]['bbox'][1],regionsLog[maxIndex]['bbox'][3]]
labelsArr = np.array(labelsLog)
return labelsArr, centreCoords, radius, colsCoord
def gaborFilter(img_in):
filtered_ims = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (6, 7):
for frequency in (0.06, 0.07):
filt_real, _ = gabor(img_in, frequency, theta=theta,sigma_x=sigma, sigma_y=sigma) # _ = imaginary part
filtered_ims.append(filt_real)
filtered_ims_arr = np.array(filtered_ims)
return filtered_ims_arr
def getFeature2(greenChannel):
filtered_ims_arr = gaborFilter(greenChannel)
mean = filtered_ims_arr[0]
for k in range(1,16):
mean = np.add(mean,filtered_ims_arr[k])
mean = np.divide(mean,16)
mean = mean.astype(np.uint8)
clahe = cv2.createCLAHE(clipLimit=4, tileGridSize=(8, 8))
imgContrasted = clahe.apply(mean)
maxVal = np.max(imgContrasted)
feature2 = np.multiply(np.divide(imgContrasted,maxVal),255)
feature2 = feature2.astype(np.uint8)
return feature2
def getFeature3(elipsResized,Od_res,resized):
Od_uint8 = np.copy(Od_res.astype(np.uint8))
Od_uint8 = np.divide(Od_uint8,np.max(Od_uint8)).astype(np.uint8)
testDistance = distance_transform_edt(1 - Od_uint8)
testDistance = np.multiply(testDistance,255/np.max(testDistance))
testDistance = testDistance.astype(np.uint8)
distanceToOd = 255-testDistance
distanceToOd[distanceToOd >220] = 255
distanceToOd[distanceToOd <=220] = 0
vesselsRelevantLine = np.logical_and(distanceToOd,elipsResized)
vesselsRelevantLine = vesselsRelevantLine*1
distanceToVesselLine = distance_transform_edt(1 - vesselsRelevantLine)
distanceToVesselLine = np.multiply(distanceToVesselLine,255/np.max(distanceToVesselLine))
distanceToVesselLine = distanceToVesselLine.astype(np.uint8)
distanceToVesselLine = 255-distanceToVesselLine
distanceToVesselLine[distanceToVesselLine >220] = 255
distanceToVesselLine[distanceToVesselLine <=220] = 0
distanceToVesselLine = np.logical_or(distanceToVesselLine,Od_uint8)
greenChannel = np.copy(resized[...,1])
vesselLine_indxs=np.where(distanceToVesselLine!=0)
#greenChannel[vesselLine_indxs] = 0
clahe = cv2.createCLAHE(clipLimit=4, tileGridSize=(8, 8))
greenContrasted = clahe.apply(greenChannel)
greenContrasted = np.multiply(greenContrasted,255/np.max(greenContrasted))
greenContrasted = greenContrasted.astype(np.uint8)
greenContrasted[vesselLine_indxs] = 0
return greenContrasted
def getFeature4(resized):
radius = 1
n_points = 8 * radius
GC = np.copy(resized[...,1])
lbp = local_binary_pattern(GC, n_points, radius,method="ror")
step = int(-1 * np.max(lbp)/10)
feature4 = np.copy(lbp)
th_feat4 = int(np.max(lbp)+(step*7))
feature4[feature4 < th_feat4] = 0
feature4[feature4 >= th_feat4] = 255
feature4 = distance_transform_edt(255-feature4)
feature4 = np.multiply(feature4,255/np.max(feature4))
feature4 = feature4.astype(np.uint8)
return feature4
def getFeatures567(img,height,width,scale_percent):
scale_percent = int(100/scale_percent)
feature5 = np.zeros([height,width])
feature6 = np.zeros([height,width])
feature7 = np.zeros([height,width])
for m in range(width):
for t in range(height):
patch = np.copy(img[t*scale_percent:(t+1)*scale_percent,m*scale_percent:(m+1)*scale_percent,1])
glcm = greycomatrix(patch, [5], [0], 256, symmetric=True, normed=True)
feature5[t,m]=greycoprops(glcm, 'dissimilarity')[0, 0] # good
feature6[t,m]=greycoprops(glcm, 'contrast')[0, 0] # th-55 good
feature7[t,m]=greycoprops(glcm, 'homogeneity')[0, 0] # ok
feature5 = np.divide(feature5,np.max(feature5))
feature5 = np.multiply(feature5,255)
feature5 = feature5.astype(np.uint8)
feature6[feature6<55]=0
feature6[feature6>=55]=255
feature6 = feature6.astype(np.uint8)
feature7 = np.divide(feature7,np.max(feature7))
feature7 = np.multiply(feature7,255)
feature7 = 255-feature7
feature7 = feature7.astype(np.uint8)
return feature5, feature6, feature7
def calculateHue(img):
RC = img[...,2]/255 #
GC = img[...,1]/255
BC = img[...,0]/255
num = np.multiply(np.add(np.subtract(RC,GC),np.subtract(RC,BC)),0.5)
denom = np.power(np.add(np.power(np.subtract(RC,GC),2),np.multiply(np.subtract(RC,BC),np.subtract(GC,BC))),0.5)
angle = np.divide(num,np.add(denom,0.000000000001))
H = np.multiply(np.arccos(angle),57.295779513)
H[BC>GC] = 360 - H[BC>GC]
H = np.divide(H,360)
H[H>0.3]=0
return H
def getFeature8(resized):
Hue = calculateHue(np.copy(resized))
clahe = cv2.createCLAHE(clipLimit=4, tileGridSize=(8, 8))
Hue = np.divide(Hue,np.max(Hue))
Hue = Hue * 255
Hue = Hue.astype(np.uint8)
contrastedHue = clahe.apply(Hue)
return contrastedHue
def getBoundingBlackBars(img):
firstCol = np.min(np.where(img!=0)[1])
lastCol = np.max(np.where(img!=0)[1])
finalWidth = int(lastCol-firstCol)
offsetRows = int(round((finalWidth-img.shape[0])/2))
return finalWidth, offsetRows, firstCol, lastCol
def removeBlackBarsRetina(img,finalWidth,offsetRows,firstCol,lastCol,RGB=True):
#Get a squared image
#Retina image
if RGB:
squareToBePatched = np.zeros([img.shape[0],finalWidth,3])
squareToBePatched[::,::] = np.copy(img[::,firstCol:lastCol])
else:
squareToBePatched = | np.zeros([img.shape[0],finalWidth]) | numpy.zeros |
"""
This module contains the functions needed to use the spherical harmonic techniques
"""
import numpy as np
import scipy.sparse
from scipy.special import lpmv, spherical_jn, spherical_yn
from numba import jit
#@jit(nopython=True)
def sub2indSH(m,n):
"""
i = sub2indSH(m,n)
Convert Spherical Harmonic (m,n) indices to array index i
Assumes that i iterates from 0 (Python style)
"""
i = n**2 + n + m
return i
#@jit(nopython=True)
def ind2subSH(i):
"""
(m,n) = ind2subSH(i)
Convert array index i to Spherical Harmonic (m,n) indices
Assumes that i iterates from 0 (Python style)
Assumes that arguments are NumPy arrays
"""
n = np.ceil(np.sqrt(i+1)-1);
m = i - n**2 - n;
return (m,n)
#@jit(nopython=True)
def cart2sph(x,y,z):
"""
(r, alpha, sinbeta, cosbeta) = Cart2Sph(x,y,z)
Converts Cartesian coordinates (x,y,z) into Spherical Polar Coordinates
(r, alpha, beta), where alpha is azimuth angle (angle in radians from the
positive x axis, with rotation around the positive z axis according to
the right-hand screw rule) and beta is polar angle (angle in radians from
the positive z axis). beta can alternatively be returned as two arrays of
its cos and sin values.
It is assumed that x, y and z are all the same size.
The returned arrays will be the same size as the arguments.
"""
r = np.sqrt(x**2 + y**2 + z**2)
rho = np.sqrt(x**2 + y**2)
alpha = np.arctan2(y, x)
cosbeta = z / r
sinbeta = rho / r
return r, alpha, sinbeta, cosbeta
#@jit(nopython=True)
def reflect_sh(Bnm, xFlag, yFlag, zFlag):
"""
Bnm = ReflectSH(Bnm, xFlag, yFlag, zFlag)
Reflect an Spherical Harmonic representation of a sound-field in 1 to 3 cartesian axes.
Argumments:
Bnm Vector of Spherical Harmonic weights. Must have (Order+1)^2 entries, where Order is an integer.
xFlag Boolean indicating whether to flip in the x-direction
yFlag Boolean indicating whether to flip in the y-direction
zFlag Boolean indicating whether to flip in the z-direction
"""
# Get lists of n and m values:
(m, n) = ind2subSH(np.arange(Bnm.size))
# Reflecting in Z:
if zFlag:
Bnm = Bnm * ((-1)**(n+m)).reshape((np.size(m),1))
# Reflecting in X:
if xFlag:
Bnm = Bnm * ((-1)**m).reshape((np.size(m),1))
# Reflecting in X or Y:
if xFlag**yFlag: # XOR
#for n in range(int(np.ceil(np.sqrt(Bnm.size)))-1):
for n in np.arange(max(n)+1):
i = sub2indSH(np.arange(-n,n+1),n).astype(int)
Bnm[i,0] = np.flip(Bnm[i,0])
return Bnm
#@jit(nopython=True)
def get_translation_matrix(t,k,OrderS,OrderR):
"""
T = GetTranslationMatrix(t,k,OrderS,OrderR)
Computes a translation matrix T from the coefficients of a Spherical
Harmonic source (outgoing spherical Hankel radial functions) to the
coefficients at a Spherical Harmonic receiver (spherical Bessel radial
functions) location at position t relative to the source. It is assumed
that both spherical coordinate systems (source and receiver) are aligned
to the same Cartesian system in which t is expressed. a is the polar
angle from the postive z axis.
Essentially computes equation 3.2.17 of:
<NAME>., & <NAME>. (2005). Fast Multipole Methods for the
Helmholtz Equation in Three Dimensions (1st ed.). Elsevier Science.
Arguments:
t Cartesian translation vector (1x3 real row vector)
k Wavenumber (positive real scalar or vector in radians/meter)
OrderS Order of the source (non-negative real integer scalar)
OrderR Order of the receiver (non-negative real integer scalar)
This file also contains the sub-functions
GetStructuralTranslationCoefficients and Wigner3jSymbol.
"""
OrderT = OrderS + OrderR
S = GetStructuralTranslationCoefficients(OrderS,OrderR)
# Express t in spherical coordinates:
[r,alpha,sinbeta,cosbeta] = cart2sph(t[0],t[1],t[2])
# Evaluate spherical harmonic functions:
Y, dy_dbeta, dy_dalpha = spherical_harmonic_all(OrderT, np.array([[alpha]]), np.array([[sinbeta]]), np.array([[cosbeta]]))
# Allocated results array:
T = np.zeros(((OrderR+1)**2, (OrderS+1)**2))
# Loop over translation order & compute summation:
for nT in np.arange(OrderT+1):
h, dhdz = spherical_hankel_out(nT,k*r) # Compute radial function:
for mT in np.arange(-nT, nT+1):
iT = sub2indSH(mT,nT)
T = T + h * Y[0][int(iT)] * S[int(iT),:,:] #!!!
return T
#@jit(nopython=True)
def GetStructuralTranslationCoefficients(OrderS,OrderR):
"""
S = GetStructuralTranslationCoefficients(OrderS,OrderR)
Computes the 'Structural Translation Coefficients' used in Spherical
Harmonic translation routines, as defined in section 3.2.1 of:
<NAME>., & <NAME>. (2005). Fast Multipole Methods for the
Helmholtz Equation in Three Dimensions (1st ed.). Elsevier Science.
Arguments:
OrderS Order of the source (non-negative real integer scalar)
OrderR Order of the receiver (non-negative real integer scalar)
Returned variable is a 3D array of size [(OrderR+1)**2, (OrderS+1)**2,
(OrderR+OrderS+1)**2].
"""
# Order required for translation:
OrderT = OrderS + OrderR
# Allocate cell array:
S = np.zeros(((OrderT+1)**2, (OrderR+1)**2, (OrderS+1)**2), dtype = np.complex64)
# Loop over translation order (n2 & m2):
for nT in np.arange(OrderT+1, dtype = np.float64): # n'' in book
for mT in np.arange(-nT, nT+1, dtype = np.float64): # m'' in book
iT = sub2indSH(mT,nT)
if mT < 0: # because m'' is negated
epT = (-1)**mT
else:
epT = 1.0
# Loop over source order (nS & mS):
for nS in np.arange(OrderS+1, dtype = np.float64): # n in book
for mS in np.arange(-nS, nS+1, dtype = np.float64): # m in book
if mS > 0:
epS = (-1)**mS
else:
epS = 1.0
# Loop over recevier order (nR & mR):
for nR in np.arange(OrderR+1, dtype = np.float64): # n' in book
for mR in np.arange(-nR, nR+1, dtype = np.float64): # m' in book
if mR < 0: # because m' is negated
epR = (-1)**mR
else:
epR = 1.0
# Compute coefficient if within non-zero range:
if nT >= abs(nR-nS) and nT <= (nR+nS):
S[int(iT), int(sub2indSH(mR,nR)), int(sub2indSH(mS,nS))] = (
1j**(nR+nT-nS) * epS * epR * epT
* np.sqrt(4*np.pi*(2*nS+1)*(2*nR+1)*(2*nT+1))
* Wigner3jSymbol(nS, nR, nT, mS, -mR, -mT)
* Wigner3jSymbol(nS, nR, nT, 0, 0, 0)
)
return S
#@jit(nopython=True)
def Wigner3jSymbol(j1, j2, j3, m1, m2, m3):
"""
W3jS = Wigner3j(j1, j2, j3, m1, m2, m3)
Computes the Wigner 3j symbol following the formulation given at
http://mathworld.wolfram.com/Wigner3j-Symbol.html.
Arguments:
j1, j2, j3, m1, m2 and m3 All must be scalar half-integers
Check arguments against 'selection rules' (cited to Messiah 1962, pp. 1054-1056; Shore and Menzel 1968, p. 272)
Nullifying any of these means the symbol equals zero.
"""
if abs(m1)<=abs(j1) and abs(m2)<=abs(j2) and abs(m3)<=abs(j3) and m1+m2+m3==0 and abs(j1-j2)<=j3 and j3<=(j1+j2) and np.remainder(j1+j2+j3,1)==0:
# Evaluate the symbol using the Racah formula (Equation 7):
# Evalaute summation:
W3jS = 0
for t in np.arange(min([j1+j2-j3, j1-m1, j2+m2])+1):
if (j3-j2+t+m1>=0) and (j3-j1+t-m2>=0) and (j1+j2-j3-t>=0) and (j1-t-m1>=0) and (j2-t+m2>=0):
# Only include term in summation if all factorials have non-negative arguments
x = (np.math.factorial(t)
* np.math.factorial(j3-j2+t+m1)
* np.math.factorial(j3-j1+t-m2)
* np.math.factorial(j1+j2-j3-t)
* np.math.factorial(j1-t-m1)
* np.math.factorial(j2-t+m2)
)
W3jS = W3jS + (-1)**t/x
# Coefficients outside the summation:
W3jS = (W3jS
* (-1)**(j1-j2-m3)
* np.sqrt(float(np.math.factorial(j1+m1)*np.math.factorial(j1-m1)*np.math.factorial(j2+m2)*np.math.factorial(j2-m2)* np.math.factorial(j3+m3)*np.math.factorial(j3-m3)))
* np.sqrt(float(np.math.factorial(j1 + j2 - j3)*np.math.factorial(j1 - j2 + j3)*np.math.factorial(-j1 + j2 + j3) / np.math.factorial(j1 + j2 + j3 + 1)))
)
else:
W3jS = 0 # One of the 'Selection Rules' was nullified.
return W3jS
#@jit(nopython=True)
def get_rotation_matrix(a,b,c,Order):
"""
[R, Q] = GetRotationMatrix(a,b,c,Order)
Computes a rotation matrix R between the coefficients of Spherical
Harmonic sound field descriptions before and after rotation. Note that R
is block diagonal, since rotations only involve coefficients within an
order, so it's returned as a sparse matrix. R is square & size (Order+1)^2.
Essentially this is equations 3.3.37 and 3.3.39 of:
<NAME>., & <NAME>. (2005). Fast Multipole Methods for the
Helmholtz Equation in Three Dimensions (1st ed.). Elsevier Science.
The rotation is actually comprised of three rotations, as detailed on
page 121 and Eq. 3.3.12:
1) Rotation by a radians around z axis of the original coordinate system*
2) Rotation by b radians around the y axis of the transitionary coordinate system
3) Rotation by c radians around the z axis of the new coordinate system
* note that the formulation there actually rotates by pi-a; this script
makes that substitution so that a = 0 means no rotation (rather more intuitive!)
Optionally also returns a 3 x 3 matrix Q, which is the tensor product
between the original and transformed coordinate system unit vectors.
Arguments:
a First rotation angle in radians (real scalar)
b Second rotation angle in radians (real scalar)
c Third rotation angle in radians (real scalar)
Order Spherical Harmonic Order (non-negative real integer scalar)
"""
# Argument checking:
### droped
# Allocate R:
R = np.zeros(((Order+1)**2, (Order+1)**2), dtype = np.complex128)
# Loop over SH order:
for n in np.arange(Order + 1, dtype=float):
for m1 in np.arange(-n, n + 1, dtype=float):
for m2 in np.arange(-n, n + 1, dtype=float):
# Evalute Eq. 3.3.39:
if m1 > 0:
ep1 = (-1)**m1
else:
ep1 = 1
if m2 > 0:
ep2 = (-1)**m2
else:
ep2 = 1
H = 0
for s in np.arange(max(0, -(m1+m2)), min(n-m1,n-m2) + 1):
H = H + (-1)**(n-s) * np.cos(b/2)**(2*s+m2+m1) * np.sin(b/2)**(2*n-2*s-m2-m1) / (np.math.factorial(s) * np.math.factorial(n-m1-s) * np.math.factorial(n-m2-s) * np.math.factorial(m1+m2+s))
#print(H)
H = H * ep1 * ep2 * np.sqrt(float(np.math.factorial(n+m2)*np.math.factorial(n-m2)*np.math.factorial(n+m1)*np.math.factorial(n-m1)))
#print(H)
# Evaluate Eq. 3.3.37:
R[int(sub2indSH(m2,n)), int(sub2indSH(m1,n))] = (-1)**m1 * np.exp(-1j*m1*a) * np.exp(-1j*m2*c) * H
#print((-1)**m1 * np.exp(-1j*m1*a) * np.exp(-1j*m2*c) * H)
R = scipy.sparse.csr_matrix(R)
# # Compute Q if required, using Eq. 3.3.12:
# Q1 = np.array([[np.sin(a), np.cos(a), 0], [-np.cos(a), np.sin(a), 0], [0, 0, 1]])
# Q2 = np.array([[-1, 0, 0], [0, -np.cos(b), np.sin(b)], [0, np.sin(b), np.cos(b)]])
# Q3 = np.array([[np.sin(c), np.cos(c), 0], [-np.cos(c), np.sin(c), 0], [0, 0, 1]])
# Q = Q3 * Q2 * Q1;
return R
#@jit(nopython=True)
def spherical_harmonic(n, m, alpha, sinbeta, cosbeta):
"""
(Y, dY_dbeta, dY_dalpha) = SphericalHarmonic(n, m, alpha, sinbeta, cosbeta)
Computes a Spherical Harmonic function of order (m,n) and it's angular derivatives.
Arguments - these should all be scalars:
r is radius
alpha is azimuth angle (angle in radians from the positive x axis, with
rotation around the positive z axis according to the right-hand screw rule)
beta is polar angle, but it is specified as two arrays of its cos and sin values.
m and n should be integer scalars; n should be non-negative and m should be in the range -n<=m<=n
Returned data will be vectors of length (Order+1)^2.
Associated Legendre function its derivatives for |m|:
"""
p_nm = lpmv(abs(m), n, cosbeta)
if n == 0:
dPmn_dbeta = 0
elif m == 0:
dPmn_dbeta = lpmv(1, n, cosbeta)
elif abs(m) < n:
dPmn_dbeta = 0.5 * lpmv(abs(m) + 1, n, cosbeta) - 0.5 * (n + abs(m)) * (n - abs(m) + 1) * lpmv(abs(m) - 1, n, cosbeta);
elif (abs(m) == 1) and (n == 1):
dPmn_dbeta = -cosbeta
elif sinbeta<=np.finfo(float).eps:
dPmn_dbeta = 0
else:
dPmn_dbeta = -abs(m) * cosbeta * p_nm / sinbeta - (n + abs(m)) * (n - abs(m) + 1) * lpmv(abs(m) - 1, n, cosbeta)
# Compute scaling term, including sign factor:
scaling_term = ((-1) ** m) * np.sqrt((2 * n + 1) / (4 * np.pi * np.prod(np.float64(range(n - abs(m) + 1, n + abs(m) + 1)))))
# Compute exponential term:
exp_term = np.exp(1j * m * alpha)
# Put it all together:
y = scaling_term * exp_term * p_nm
dy_dbeta = scaling_term * exp_term * dPmn_dbeta
dy_dalpha = y * 1j * m
return (y, dy_dbeta, dy_dalpha)
#@jit(nopython=True)
def spherical_harmonic_all (max_order, alpha, sinbeta, cosbeta):
"""
(y, dy_dbeta, dy_dalpha) = spherical_harmonic_all(max_order, alpha, sinbeta, cosbeta)
Computes a Spherical Harmonic function and it's angular derivatives for
all (m,n) up to the given maximum order. The algorithm is equivalent to that
implemented in SphericalHarmonic, but this version avoids repeated calls
to lpmv, since that is very time consuming.
Arguments - these should all be scalars:
r is radius
alpha is azimuth angle (angle in radians from the positive x axis, with
rotation around the positive z axis according to the right-hand screw rule)
beta is polar angle, but it is specified as two arrays of its cos and sin values.
max_order is maximum Spherical Harmonic order and should be a non-negative real integer scalar
Returned data will be vectors of length (max_order+1)^2.
"""
# alpha = np.array([[alpha]])
# cosbeta = np.array([[cosbeta]])
# sinbeta = np.array([[sinbeta]])
# Preallocate output arrays:
y = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
dy_dbeta = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
dy_dalpha = np.zeros((np.size(alpha),(max_order+1)**2), np.complex128)
#% Loop over n and calculate spherical harmonic functions y_nm
for n in range(max_order+1):
# Compute Legendre function and its derivatives for all m:
p_n = lpmv(range(0,n+1), n, cosbeta)
#shape_p_n = np.shape(p_n)
#p_n = p_n.reshape((shape_p_n[1], shape_p_n[0]))
for m in range(-n, n+1):
# Legendre function its derivatives for |m|:
p_nm = p_n[:, np.absolute(m)]
p_nm = p_nm.reshape((np.size(p_nm), ))
if n==0:
dPmn_dbeta = 0
elif m==0:
dPmn_dbeta = p_n[:,1]
elif abs(m)<n:
dPmn_dbeta = 0.5*p_n[:,abs(m)+1] - 0.5*(n+abs(m))*(n-abs(m)+1)*p_n[:,abs(m)-1];
dPmn_dbeta = dPmn_dbeta.reshape((np.size(dPmn_dbeta), ))
elif (abs(m)==1) and (n==1):
dPmn_dbeta = -cosbeta
dPmn_dbeta = dPmn_dbeta.reshape((np.size(dPmn_dbeta), ))
#elif sinbeta<=np.finfo(float).eps:
#dPmn_dbeta = 0
else:
dPmn_dbeta = -abs(m)*cosbeta.reshape((np.size(cosbeta), ))*p_nm/sinbeta.reshape((np.size(sinbeta), )) - (n+abs(m))*(n-abs(m)+1)*p_n[:,abs(m)-1]
dPmn_dbeta = dPmn_dbeta.reshape((np.size(dPmn_dbeta), ))
#print (dPmn_dbeta)
#print (np.shape(dPmn_dbeta))
# Compute scaling term, including sign factor:
scaling_term = ((-1)**m) * np.sqrt((2 * n + 1) / (4 * np.pi * np.prod(np.float64(range(n-abs(m)+1, n+abs(m)+1)))))
# Compute exponential term:
exp_term = np.exp(1j*m*alpha)
exp_term = exp_term.reshape((np.size(exp_term), ))
# Put it all together:
i = sub2indSH(m,n)
#y[:,i] = np.multiply (exp_term, p_nm)
y[:,i] = scaling_term * exp_term * p_nm
dy_dbeta[:,i] = scaling_term * exp_term * dPmn_dbeta
dy_dalpha[:,i] = y[:,i] * 1j * m
return y, dy_dbeta, dy_dalpha
#@jit(nopython=True)
def spherical_hankel_out (n, z):
'''
(h, dhdz) = SphericalHankelOut(n, z)
Computes a spherical Hankel function of the first kind (outgoing in this
paper's lingo) and its first derivative.
'''
h = spherical_jn(n,z,False) + 1j*spherical_yn(n,z,False)
dhdz = spherical_jn(n,z,True) + 1j*spherical_yn(n,z,True)
return h, dhdz
#@jit(nopython=True)
def spherical_basis_out_all(k, Bnm, pos, nUV):
'''
(phi, dphi_dn) = SphericalBasisOutAll(k, Bnm, x, nUV)
Returns phi and dPhi/dn for a summation of outgoing Spherical Basis functions with
coefficients Bnm. The algorithm is equivalent to that implemented in SphericalBasisOut,
but this version avoids repeated call to lpmv, since that is very time consuming.
Arguments:
k wavenumber - positive real scalar
Bnm directivity coefficients - vector with a square number of elements
x evaluation positions - real-valued array with 3 columns
nUV unit vector defining direction in which to compute dPhi/dn - 1x3
Returned quantities are vectors with the same number of elements as x has rows.
'''
# Convert cartesison coordinates x to spherical coordinates:
x = pos[0].reshape((1,1))
y = pos[1].reshape((1,1))
z = pos[2].reshape((1,1))
(r, alpha, sinbeta, cosbeta) = cart2sph(x,y,z)
# dot products of nUV with unit vectors of spherical coordinate system (at x):
nUVrUV, nUValphaUV, nUVbetaUV = cart2sphUV(x,y,z,nUV)
# Evaluate spherical harmonic functions and their derivatives:
#if (Bnm.ndim != 1):
# raise IndexError('Bnm must be 1-dimensional')
Order = int(np.sqrt(Bnm.size)) - 1
y, dy_dbeta, dy_dalpha = spherical_harmonic_all(Order, alpha, sinbeta, cosbeta)
# Loop over m and n and evalute phi and dPhi/dn:
phi = np.zeros((r.size,1), np.complex64)
dphi_dn = np.zeros((r.size,1), np.complex64)
for n in range(Order + 1):
R, dR_dkr = spherical_hankel_out(n, k*r)
for m in range(-n, n + 1):
i = sub2indSH(m,n)
phi += Bnm[i,0] * R * y[0,i]
dphi_dn += Bnm[i,0] * (nUVrUV * k * dR_dkr * y[0,i] + (R / r) * (nUVbetaUV * dy_dbeta[0,i] + nUValphaUV * dy_dalpha[0,i] / sinbeta))
return (phi, dphi_dn)
#@jit(nopython=True)
def spherical_basis_out_p0_only(k, Bnm, pos):
'''
phi = SphericalBasisOutAll(k, Bnm, x)
Returns phi and dPhi/dn for a summation of outgoing Spherical Basis functions with
coefficients Bnm. The algorithm is equivalent to that implemented in SphericalBasisOut,
but this version avoids repeated call to lpmv, since that is very time consuming.
Arguments:
k wavenumber - positive real scalar
Bnm directivity coefficients - vector with a square number of elements
x evaluation positions - real-valued array with 3 columns
nUV unit vector defining direction in which to compute dPhi/dn - 1x3
Returned quantities are vectors with the same number of elements as x has rows.
'''
# Convert cartesison coordinates x to spherical coordinates:
x = pos[0].reshape((1,1))
y = pos[1].reshape((1,1))
z = pos[2].reshape((1,1))
(r, alpha, sinbeta, cosbeta) = cart2sph(x,y,z)
# Evaluate spherical harmonic functions and their derivatives:
#if (Bnm.ndim != 1):
# raise IndexError('Bnm must be 1-dimensional')
Order = int( | np.sqrt(Bnm.size) | numpy.sqrt |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = | np.array([]) | numpy.array |
__copyright__ = """
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numpy as np
import pytest
from pydemic.models import SEIRModelSimulation
@pytest.mark.parametrize("total_pop", [1e4, 1e6])
@pytest.mark.parametrize("avg_infection_rate", [12, 8])
def test_SEIR(total_pop, avg_infection_rate, infectious_rate=1,
removal_rate=1):
sim = SEIRModelSimulation(avg_infection_rate, infectious_rate, removal_rate)
compartments = ('susceptible', 'exposed', 'infectious', 'removed')
y0 = {
'susceptible': np.array(total_pop-1),
'exposed': np.array(1),
'infectious': np.array(0),
'removed': np.array(0),
}
tspan = (0, 10)
dt = 1e-3
result = sim(tspan, y0, dt)
t = result.t
def f(t, y):
S, E, I, R = y
S_to_E = avg_infection_rate * S * I / total_pop
E_to_I = infectious_rate * E
I_to_R = removal_rate * I
dydt = [-S_to_E, S_to_E - E_to_I, E_to_I - I_to_R, I_to_R]
return np.array(dydt)
initial_position = [total_pop-1, 1, 0, 0]
from scipy.integrate import solve_ivp
res = solve_ivp(f, tspan, initial_position, rtol=1.e-13, method='DOP853',
dense_output=True)
true_sol = {comp: res.sol(t)[i] for i, comp in enumerate(compartments)}
for i, name in enumerate(compartments):
non_zero = np.logical_and(true_sol[name] > 0, result.y[name] > 0)
test = np.logical_and(non_zero, t > 1)
relerr = np.abs(1 - true_sol[name][test] / result.y[name][test])
print('max err for', name, 'is', np.max(relerr))
assert np.max(relerr) < .05
total_people = sum(result.y[name] for name in compartments)
total_err = np.max(np.abs(1 - total_people / total_pop))
print('total error is', np.max(total_err))
assert | np.max(total_err) | numpy.max |
#!/usr/bin/env python #
# ------------------------------------------------------------------------------------------------------#
# Created by "<NAME>" at 16:32, 07/12/2019 #
# #
# Email: <EMAIL> #
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 #
# Github: https://github.com/thieunguyen5991 #
#-------------------------------------------------------------------------------------------------------#
import numpy as np
class Functions:
"""
This class of functions is belongs to multi-modal function, all functions will be scaled to n-dimension space
"""
def _ackley_1__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-separable, Scalable,
Global: one global minimum fx = 0, at [0, 0,...0]
@param solution: A numpy array like with x_i in [-35, 35]
@return: fx
"""
return -20*np.exp(-0.02*np.sqrt(np.sum(solution**2)/len(solution))) - \
np.exp(np.sum(np.cos(2*np.pi*solution))/len(solution)) + 20 + np.e
def _ackley_4__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Scalable
Global: 2 global minimum
@param solution: A numpy array like with x_i in [-35, 35]
@return: fx
"""
d = len(solution)
result = 0
for i in range(0, d):
result += np.exp(-0.2*np.sqrt(solution[i]**2 + solution[i+1]**2)) + 3*(np.cos(2*solution[i]) + np.sin(2*solution[1]))
return result
def _alpine_1__(self, solution=None):
"""
Class: Continuous, Non-Differentiable, Separable, Non-Scalable
Global: 1 global minimum, fx = 0, at [0, ..., 0]
@param solution: A numpy array like with x_i in [-10, 10]
@return: fx
"""
return np.sum(np.dot(solution, np.sin(solution)) + 0.1 * solution)
def _alpine_2__(self, solution=None):
"""
Class: Continuous, Differentiable, Separable, Scalable
Global: 1 global minimum, fx = 2.808^D, at [7.917, ..., 7.917]
@param solution: A numpy array like with x_i in [0, 10]
@return: fx
"""
return np.prod(np.sqrt(solution)*np.sin(solution))
def _cosine_mixture__(self, solution=None):
"""
Class: Discontinuous, Non-Differentiable, Separable, Scalable
Global:
@param solution: A numpy array like with x_i in [-1, 1]
@return: fx
"""
return -0.1*np.sum(np.cos(5*np.pi*solution)) - np.sum(solution**2)
def _csendes__(self, solution=None):
"""
Class: Continuous, Differentiable, Separable, Scalable
Global: fx = 0, at [0, ...,0]
@param solution: A numpy array like with x_i in [-1, 1]
@return: fx
"""
return np.sum(solution**6*(2+np.sin(1.0/solution)))
def _deb_1__(self, solution=None):
"""
Class: Continuous, Differentiable, Separable, Scalable
Global:
@param solution: A numpy array like with x_i in [-1, 1]
@return: fx
"""
return -np.sum(np.sin(5*np.pi*solution)**6) / len(solution)
def _deb_3__(self, solution=None):
"""
Class: Continuous, Differentiable, Separable, Scalable
Global:
@param solution: A numpy array like with x_i in [-1, 1]
@return: fx
"""
return -np.sum(np.sin(5*np.pi*(solution**0.75 - 0.05))**6) / len(solution)
def _egg_holder__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Scalable
Global:
@param solution: A numpy array like with x_i in [-512, 512]
@return: fx
"""
d = len(solution)
result = 0
for i in range(0, d):
result += -(solution[i+1]+47) * np.sin(np.sqrt(np.abs(solution[i+1] + solution[i] / 2 + 47))) -\
solution[i]*np.sin(np.sqrt(np.abs(solution[i] - solution[i+1] - 47)))
return result
def _exponential__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Scalable
Global: one global minimum fx = 1, at [0,...,0]
@param solution: A numpy array with x_i in [-1, 1]
@return: fx
"""
return -np.exp(0-.5*np.sum(solution**2))
def _griewank__(self, solution=None):
"""
Class: uni-modal, non-convex, continuous
Global: one global minimum fx = 0, at [0, ..., 0]
@param solution: A numpy array with x_i in [-600, 600]
@return: fx
"""
d = len(solution)
result = 1 + np.sum(solution**2) / 4000
prod = 1.0
for i in range(0, d):
prod *= np.cos(solution[i]/np.sqrt(i+1))
return result - prod
def _mishra_1__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Scalable
Global: one global minimum fx = 2
@param solution: A numpy array with x_i in [0, 1]
@return: fx
"""
d = len(solution)
result = d - np.sum(solution[:d-1])
return (1+result)**result
def _mishra_2__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Scalable
Global: one global minimum fx = 2
@param solution: A numpy array with x_i in [0, 1]
@return: fx
"""
d = len(solution)
result = 0
for i in range(0, d-1):
result += 0.5*(solution[i]+solution[i+1])
result = d - result
return (1+result)**result
def _mishra_7__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Non-Scalable
Global: one global minimum fx = 0
@param solution: A numpy array
@return: fx
"""
return (np.prod(solution) - np.math.factorial(len(solution)))**2
def _mishra_11__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Non-Scalable
Global: one global minimum fx = 0
@param solution: A numpy array
@return: fx
"""
d = len(solution)
return (np.sum(np.abs(solution))/d - (np.prod(np.abs(solution)))**(1/d))**2
def _pathological__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-Separable, Non-Scalable
Global: one global minimum fx = 0, at [0, ..., 0]
@param solution: A numpy array with x_i in [-100, 100]
@return: fx
"""
d = len(solution)
result = 0
for i in range(0, d-1):
result += 0.5 + ( np.sin(np.sqrt(100*solution[i]**2 + solution[i+1]**2))**2 -0.5 ) / \
(1 + 0.001*(solution[i]**2 - 2*solution[i]*solution[i+1] + solution[i+1]**2)**2)
return result
def _pinter__(self, solution=None):
"""
Class: Continuous, Differentiable, Non-separable, Scalable
Global: global minimum fx = 0, at [0, ..., 0]
@param solution: A numpy array with x_i in [-10, 10]
@return: fx
"""
d = len(solution)
result1 = 0
result2 = 0
result3 = 0
for i in range(0, d):
result1 += (i+1)*solution[i]**2
if i==0:
result2 += 20*(i+1)*np.sin( | np.sin(solution[i]) | numpy.sin |
'''
File name: PointNet Definition
Author: minhnc
Date created(MM/DD/YYYY): 10/2/2018
Last modified(MM/DD/YYYY HH:MM): 10/2/2018 5:25 AM
Python Version: 3.6
Other modules: [None]
Reference for loss: https://gist.github.com/wassname/ce364fddfc8a025bfab4348cf5de852d#file-keras_weighted_categorical_crossentropy-py
Copyright = Copyright (C) 2017 of NGUY<NAME>
Credits = [None] # people who reported bug fixes, made suggestions, etc. but did not actually write the code
License = None
Version = 0.9.0.1
Maintainer = [None]
Email = <EMAIL>
Status = Prototype # "Prototype", "Development", or "Production"
Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting
'''
#==============================================================================
# Imported Modules
#==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import time
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0" # The GPU id to use, usually either "0" or "1"
import numpy as np
import tensorflow as tf
import keras
from keras.layers import Input
from keras.models import Model
from keras.layers import Dense, Reshape, LSTM
from keras.layers import Convolution1D, MaxPooling1D, AveragePooling1D, GlobalMaxPooling1D, BatchNormalization, Activation, Flatten, Dropout
from keras.layers import Lambda, concatenate
from keras.regularizers import l2
#==============================================================================
# Constant Definitions
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def exp_dim0(global_feature, axis):
return tf.expand_dims(global_feature, axis)
def exp_dim(global_feature, num_points):
return tf.tile(global_feature, [1, num_points, 1])
def PointNet(num_points, num_classes):
'''
inputs:
num_points: integer > 0, number of points for each point cloud image
num_classes: total numbers of segmented classes
outputs:
onehot encoded array of classified points
'''
'''
Begin defining Pointnet Architecture
'''
input_points = Input(shape=(num_points, 3))
x = Convolution1D(64, 1, activation='relu',
input_shape=(num_points, 3))(input_points)
x = BatchNormalization()(x)
x = Convolution1D(128, 1, activation='relu')(x)
x = BatchNormalization()(x)
x = Convolution1D(1024, 1, activation='relu')(x)
x = BatchNormalization()(x)
# x = MaxPooling1D(pool_size=num_points)(x)
x = GlobalMaxPooling1D()(x)
x = Dense(512, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(256, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(9, weights=[np.zeros([256, 9]), np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32)])(x)
input_T = Reshape((3, 3))(x)
## forward net
g = keras.layers.dot(inputs=[input_points, input_T], axes=2)
g = Convolution1D(64, 1, input_shape=(num_points, 3), activation='relu')(g)
g = BatchNormalization()(g)
g = Convolution1D(64, 1, input_shape=(num_points, 3), activation='relu')(g)
g = BatchNormalization()(g)
## feature transformation net
f = Convolution1D(64, 1, activation='relu')(g)
f = BatchNormalization()(f)
f = Convolution1D(128, 1, activation='relu')(f)
f = BatchNormalization()(f)
f = Convolution1D(1024, 1, activation='relu')(f)
f = BatchNormalization()(f)
# f = MaxPooling1D(pool_size=num_points)(f)
f = GlobalMaxPooling1D()(f)
f = Dense(512, activation='relu')(f)
f = BatchNormalization()(f)
f = Dense(256, activation='relu')(f)
f = BatchNormalization()(f)
f = Dense(64 * 64, weights=[np.zeros([256, 64 * 64]), | np.eye(64) | numpy.eye |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
| np.arange(28, 36) | numpy.arange |
from scipy import ndimage
import tensorflow as tf
from spatial_transformer import AffineVolumeTransformer
import numpy as np
import scipy.misc
import binvox_rw
import sys
def read_binvox(f):
class Model:
pass
model = Model()
line = f.readline().strip()
if not line.startswith(b'#binvox'):
raise IOError('Not a binvox file')
model.dims = list(map(int, f.readline().strip().split(b' ')[1:]))
model.translate = list(map(float, f.readline().strip().split(b' ')[1:]))
model.scale = float(f.readline().strip().split(b' ')[1])
_ = f.readline()
raw_data = np.frombuffer(f.read(), dtype=np.uint8)
values, counts = raw_data[::2], raw_data[1::2]
# xzy (binvox) -> zyx (tensorflow)
model.data = np.transpose(np.repeat(values, counts).astype(np.bool).reshape(model.dims), (1,2,0))
# zxy -> zyx (should all be equal, so doesn't matter)
model.dims = [model.dims[i] for i in [0,2,1]]
return model
def write_binvox(model, f):
f.write(b'#binvox 1\n')
f.write(('dim '+' '.join(map(str, [model.dims[i] for i in [0,2,1]]))+'\n').encode())
f.write(('translate '+' '.join(map(str, model.translate))+'\n').encode())
f.write(('scale'+str(model.scale)+'\n').encode())
f.write(b'data\n')
# zyx (tensorflow) -> xzy (binvox)
voxels = np.transpose(model.data, (2, 0, 1)).flatten()
# run length encoding
value = voxels[0]
count = 0
def dump():
if sys.version_info[0] < 3:
# python 2
f.write(chr(value))
f.write(chr(count))
else:
# python 3
f.write(bytes((value,)))
f.write(bytes((count,)))
for curval in voxels:
if curval==value:
count += 1
if count==255:
dump()
count = 0
else:
dump()
value = curval
count = 1
if count > 0:
dump()
# Input image retrieved from:
# https://raw.githubusercontent.com/skaae/transformer_network/master/cat.jpg
with open('data/model.binvox', 'rb') as f:
model = read_binvox(f)
vol = model.data.copy().astype(np.float32)
pad_size = 12
vol = np.pad(vol, pad_width=[[pad_size,pad_size], [pad_size,pad_size], [pad_size,pad_size]], mode='constant')
model.dims = (np.array(model.dims) + 2*pad_size).tolist()
# input batch
batch_size = 3
batch = np.expand_dims(vol, axis=3)
batch = np.expand_dims(batch, axis=0)
batch = np.tile(batch, [batch_size, 1, 1, 1, 1])
# input placeholder
# depth, height, width, in_channels
x = tf.placeholder(tf.float32, [batch_size, vol.shape[0], vol.shape[1], vol.shape[2], 1])
outsize = (int(vol.shape[0]), int(vol.shape[1]), int(vol.shape[2]))
# Affine Transformation Layer
stl = AffineVolumeTransformer(outsize)
theta = tf.placeholder(tf.float32, [batch_size, stl.param_dim])
# Identity transformation parameters
initial = np.array([1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0 ]).astype('float32')
initial = np.reshape(initial, [1, stl.param_dim])
# x-axis-rot, y-axis-rot, z-axis-rot
def transmat(phi, theta, psi, shiftmat=None):
batch_size = phi.shape[0]
assert batch_size==theta.shape[0] and batch_size==psi.shape[0], 'must have same number of angles for x,y and z axii'
assert phi.ndim==1 and theta.ndim==1 and psi.ndim==1, 'must be 1 dimensional array'
if shiftmat is None:
shiftmat = np.zeros([batch_size,3,1])
rotmat = | np.zeros([batch_size, 3,3]) | numpy.zeros |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from decorator import decorator
# Moving Average
def MA(ds, n):
MA = pd.Series(ds.rolling(n).mean(), name = 'MA_' + str(n))
return MA
# difference between short MA and long MA
def diffMA(ds, l=60, s=5):
"""
ds: dataset is pandas data series
"""
ma_l = ds.rolling(l, min_periods=l).mean()
ma_s = ds.rolling(s, min_periods=s).mean()
return (ma_s/ma_l)-1
# Linear Regression
import statsmodels.formula.api as smf
def liner_regression(x,y):
model = smf.OLS(y,x)
results = model.fit()
b = results.params
R = results.rsquared
pvalue = results.pvalues
t='Y=%0.4fX --- R2=%0.2f%% --- p-value=%0.4f' %(b[0], R*100, pvalue[0])
return b,t
# slope of MA
def slopeMA(ds, m=60, dw=5):
ma = ds.rolling(m, min_periods=1).mean()
slope = ma.copy()
x = np.arange(1,dw+1)/100.0
for t in range(dw,len(slope)):
y = ma[t-dw+1:t+1] / ma[t-dw+1:t+1].mean() - 1
slope[t], _ = liner_regression(x,y)
return slope
# garch
def addGARCH(ds, hln=200):
ts = 100*ds.to_returns().dropna()
hts = ts[:hln].values
var = []
# rolling estimate var
while (len(hts)<len(ts)):
f_var, _ = forecast_var_from_garch(hts[-hln:])
var.append(f_var)
hts = np.append(hts, ts.iloc[len(hts)])
print(max(var), min(var))
var = np.append(np.zeros([len(ds)-len(var),1]), var)
return var
# historical var
def addVAR(ds, hln=200):
ts = 100*ds.to_returns().dropna()
hts = ts[:hln].values
var = []
# rolling estimate var
while (len(hts)<len(ts)):
f_var, _ = forecast_var_from_constant_mean(hts[-hln:])
var.append(f_var)
hts = np.append(hts, ts.iloc[len(hts)])
#print(max(var), min(var))
var = np.append(np.zeros([len(ds)-len(var),1]), var)
return var
# historical cov
def addCOV(ds1, ds2, hln=200):
ts1 = ds1.to_returns().dropna().values
ts2 = ds2.to_returns().dropna().values
cov = []
#cov.append(np.nan) # add 1 when dropna at prices->returns
for t in range(hln):
cov.append(np.nan)
for t in range(hln, len(ts1)+1):
f_cov = np.cov(ts1[t-hln:t], ts2[t-hln:t])
cov.append(f_cov[0][1]*10000)
return cov
# Seek Best Garch Model
import statsmodels.tsa.api as smt
def seek_garch_model(TS):
"""
TS is returns of a price-series
numpy array or array
# Seek Best GARCH Model
res_tup = seek_garch_model(ts)
order = res_tup[1]
p_ = order[0]
o_ = order[1]
q_ = order[2]
# Using student T distribution usually provides better fit
am = arch_model(ts, p=p_, o=o_, q=q_, dist='StudentsT')
res = am.fit(update_freq=5, disp='off')
fig = res.plot(annualize='D')
print(res.summary())
ts_plot(res.resid, lags=30)
"""
best_aic = np.inf
best_order = None
best_mdl = None
pq_rng = range(5) # [0,1,2,3,4]
d_rng = range(2) # [0,1]
for i in pq_rng:
for d in d_rng:
for j in pq_rng:
try:
tmp_mdl = smt.ARIMA(TS, order=(i,d,j)).fit(
method='mle', trend='nc'
)
tmp_aic = tmp_mdl.aic
if tmp_aic < best_aic:
best_aic = tmp_aic
best_order = (i, d, j)
best_mdl = tmp_mdl
except: continue
print('aic: {:6.5f} | order: {}'.format(best_aic, best_order))
return best_aic, best_order, best_mdl
#under arch model scheme
@decorator
def forecast_var(model_est_var, *args, **kwargs):
"""
Use historical data (0 to t) to forecast variance at t+1
via the model (defined in arch)
Args:
* args[0]: returns (numpy array or array): Returns for security.
Returns:
forecast variance: float
residuals: array
"""
if len(args)<1:
raise Exception("Not Enough Parameters")
m = model_est_var(*args, **kwargs)
res = m.fit(update_freq=5, disp='off')
return res.forecast().variance.values[-1][0], res.resid
from arch.univariate import ConstantMean
@forecast_var
def forecast_var_from_constant_mean(returns):
"""
returns is historical returns
"""
return ConstantMean(returns)
from arch import arch_model
@forecast_var
def forecast_var_from_garch(returns):
"""
returns is historical returns
"""
return arch_model(returns, vol='Garch', p=1, o=0, q=1, dist='Normal')
@forecast_var
def forecast_var_from_best(returns):
"""
returns is historical returns
"""
from pyetf.algos import seek_garch_model
from arch import arch_model
res_tup = seek_garch_model(returns)
order = res_tup[1]
p_ = order[0]
o_ = order[1]
q_ = order[2]
return arch_model(returns, p=p_, o=o_, q=q_, dist='StudentsT')
# future mean and var
def future_mean_var(p, negative=False):
"""
p is numpy and prices series in future m dates
negative is True: calculate if p(t) < p(0)
negative is False: calculate all p(t)
"""
m = len(p)
dr = []
if negative:
for d in range(1,m):
if p[d]<p[0]:
dr.append((p[d]/p[0])**(1/d)-1)
if len(dr) == 0:
dr.append(0.)
else:
for d in range(1,m):
dr.append((p[d]/p[0])**(1/d)-1)
mean = np.mean(dr)
var = np.var(dr)
return mean, var
# future mean and var
def future_covar(p1, p2=None):
"""
p1 and p2 are numpy and prices series in future fm(30) dates
+ historical hm(200-fm) dates
p1 = p2: calculate var
"""
r1 = np.diff(p1)/p1[0:len(p1)-1]
if p2 is None:
return np.var(r1)
else:
r2 = np.diff(p2)/p1[0:len(p2)-1]
return np.cov(r1, r2)
# under keras model scheme
def strucutre_keras_model(train_model, addFeatures, addTarget, prices, prices_two=None, model_path="\\keras_model\\"):
"""
* prices: pandas series (or dataframe) with date index and prices
* function will save model estimated by keras
to a h5 file named 'est_var(_ticker_).h5'
* load model
from keras.models import load_model
model_load = load_model('est_var(_ticker_).h5')
"""
# 1. Data Process
if prices_two is None:
# 1.1 initial data
dataset, model_filename = initData(prices, model_path)
# 1.2 process data
x_dataset, y_dataset = processData(addFeatures, addTarget, dataset)
else:
dataset, model_filename = initData_two(prices, prices_two, model_path)
x_dataset, y_dataset = processData_two(addFeatures, addTarget, dataset)
# 1.3 split train set and test set
x_train, y_train, x_test, y_test = splitDataset(x_dataset, y_dataset)
# 1.4 shuttle train set
x_train, y_train = shuffleDataset(x_train, y_train)
# 2. Build Model
# 2.1 setup model
# 2.2 train model
model = train_model(x_train, y_train)
# 2.3 save model
model.save(model_filename)
# 3 evaluation
trainScore = model.evaluate(x_train, y_train)
testScore = model.evaluate(x_test, y_test)
print(f"Train Score Loss: {trainScore[0]:0.4f}")
print(f"Test Score Loss: {testScore[0]:0.4f}")
# 4. Plot Results
plt.figure(figsize=(10, 8))
#plt.plot(y_dataset)
#plt.plot(y_predict)
plt.plot(y_test)
plt.plot(model.predict(x_test))
plt.show()
from keras.models import load_model
def load_keras_model(prices, model_path="\\keras_model\\"):
# 1. Data Process
# 1.1 initial data
dataset, model_filename = initData(prices, model_path)
model = load_model(model_filename)
return dataset, model
# stucture X and Y from dataset
def buildXY(dataset, pastDays=30):
"""
Result -> numpy
"""
m = pastDays
x_dataset = dataset.drop(columns='y').values
y_dataset = dataset['y'].values
dataX, dataY = [], []
for t in range(0, len(dataset)-m+1):
dataX.append(x_dataset[t:(t+m)])
dataY.append(y_dataset[t+m-1])
return np.array(dataX), np.array(dataY)
# stucture X from dataset to forecast
def buildX(dataset, pastDays=30):
"""
Result -> numpy
"""
m = pastDays
x_dataset = dataset.values
dataX = []
for t in range(0, len(dataset)-m+1):
dataX.append(x_dataset[t:(t+m)])
return np.array(dataX)
# normalize dataset
from sklearn.preprocessing import MinMaxScaler
def normalise_windows(window_data):
scaler = MinMaxScaler(feature_range=(0, 1))
normalised_data = []
for window in window_data:
normalised_window = scaler.fit_transform(window)
normalised_data.append(normalised_window)
return normalised_data
# split dataset to train and test
def splitDataset(x_dataset, y_dataset, train_size_ratio=0.6):
train_size = int(len(x_dataset) * train_size_ratio)
x_train, x_test = x_dataset[0:train_size], x_dataset[train_size:len(x_dataset)]
y_train, y_test = y_dataset[0:train_size], y_dataset[train_size:len(y_dataset)]
return np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test)
# random train dataset
def shuffleDataset(x, y):
np.random.seed(10)
randomList = np.arange(x.shape[0])
np.random.shuffle(randomList)
return x[randomList], y[randomList]
# initial Data and model name
def initData(prices, model_path, model_name='est_var'):
if isinstance(prices, pd.core.series.Series):
e = prices.name
dataset = pd.DataFrame(prices)
else:
e = prices.columns[0]
dataset = prices.copy()
print(f"{e}")
dataset = dataset.rename({e:'price'}, axis=1)
model_path = os.getcwd() + model_path
model_filename = model_path + model_name + '(' + e + ').h5'
return dataset, model_filename
# initial Data and model name
def initData_two(prices_one, prices_two, model_path, model_name='est_cov'):
if isinstance(prices_one, pd.core.series.Series):
e1 = prices_one.name
dataset = pd.DataFrame(prices_one)
else:
e1 = prices_one.columns[0]
dataset = prices_one.copy()
dataset = dataset.rename({e1:'price_one'}, axis=1)
if isinstance(prices_two, pd.core.series.Series):
e2 = prices_two.name
dataset[e2] = pd.DataFrame(prices_two)
else:
e2 = prices_two.columns[0]
dataset[e2] = prices_two.columns[0]
dataset = dataset.rename({e2:'price_two'}, axis=1)
print(f"{e1} {e2}")
model_path = os.getcwd() + model_path
model_filename = model_path + model_name + '(' + e1+'_'+e2 + ').h5'
return dataset, model_filename
# process data: add features and add Y
def processData(addFeatures, addTarget, dataset):
# 1.2 add features to X
dataset = addFeatures(dataset)
# 1.3 add targets to Y
dataset = addTarget(dataset)
# 1.4 structure train and test data
dataset = dataset.drop(columns='price')
x_dataset, y_dataset = buildXY(dataset)
# 1.5 normalization
#x_dataset = normalise_windows(x_dataset)
return x_dataset, y_dataset
# process data: add features and add Y
def processData_two(addFeatures, addTarget, dataset, pastDays=30):
# 1.2 add features to X
dataset = addFeatures(dataset)
# 1.3 add targets to Y
dataset = addTarget(dataset)
# 1.4 structure train and test data
dataset = dataset.dropna()
dataset = dataset.drop(columns='price_one')
dataset = dataset.drop(columns='price_two')
#print(dataset.head())
#print(dataset.tail())
x_dataset, y_dataset = buildXY(dataset, pastDays)
# 1.5 normalization
#x_dataset = normalise_windows(x_dataset)
return x_dataset, y_dataset
# lstm var
from time import process_time
def forecast_var_from_lstm(addFeatures, prices, model_path="\\keras_model\\"):
"""
Prices is one asset's price data, in either DataFrame or Pandas Series
"""
# Initializing Data and Load Model
start_time = process_time()
dataset, model = load_keras_model(prices)
print(f"load data and model: {process_time()-start_time:0.4f}s")
start_time = process_time()
dataset = addFeatures(dataset)
x_dataset = dataset.drop(columns='price')
x_dataset = buildX(x_dataset)
print(f"process dataset: {process_time()-start_time:0.4f}s")
start_time = process_time()
f_var = np.append(np.zeros([len(prices)-len(x_dataset),1]), model.predict(np.array(x_dataset)))
print(f"calc var: {process_time()-start_time:0.4f}s")
return f_var
#lstm cov
def forecast_cov_from_lstm(addFeatures, prices_one, prices_two, pastDays=30, model_path="\\keras_model\\"):
"""
Prices is one asset's price data, in either DataFrame or Pandas Series
"""
# Initializing Data and Load Model
start_time = process_time()
dataset, model_filename = initData_two(prices_one, prices_two, model_path)
model = load_model(model_filename)
print(f"load data and model: {process_time()-start_time:0.4f}s")
start_time = process_time()
dataset = addFeatures(dataset)
dataset = dataset.drop(columns='price_one')
dataset = dataset.drop(columns='price_two')
x_dataset = buildX(dataset, pastDays)
print(f"process dataset: {process_time()-start_time:0.4f}s")
start_time = process_time()
f_cov = np.append(np.zeros([len(prices_one)-len(x_dataset),1]), model.predict( | np.array(x_dataset) | numpy.array |
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
import os
import placentagen as pg
import csv
D0_list = [150,150,172.8,172.8,170.8,135.8,43,230,255.6,301.6,304.1,108.1,85.7,60.7,235.6,156.5,255.4,164.8,100.8,64.9]
Cpass_list = [1.168,1.168,1.416,1.416,1.386,1.067,0.316,1.697,1.417,1.672,1.857,0.843,0.655,0.371,1.802,1.043,1.719,1.141,0.687,0.459]
Cpassdash_list = [7.24,7.24,7.901,7.901,10.568,10.516,11.247,5.298,5.628,5.324,5.226,24.279,36.785,21.035,6.782,8.293,14.354,13.828,12.606,13.431]
Cact_list = [1.108, 1.103, 1.499, 1.858, 1.514, 1.202, 0.392, 3.995, 2.649, 1.395, 3.748, 1.665, 1.024, 0.654,
0.908, 3.491, 1.564, 1.36, 1.131, 0.405]
D0_list_act = [150,172.8,170.8,135.8,43,156.5,255.4,164.8,100.8,64.9]
Cmyo_list = [7.479,8.871,8.462,7.973,24.934,9.018,4.674,7.508,15.977,22.252]
expt_pressure = np.array([10.,30.,50.,70.,90.]) # defined in mmHg
passive_diameter_preg = np.array([76.258, 122.33566667, 145.152, 137.5625, 144.64166667])
passive_se_preg = np.array([10.8693589, 10.23274183, 13.36969036, 11.7338111, 12.88427201])
passive_diameter = np.array([54.11314286, 74.08128571, 88.831, 89.99828571, 86.769])
passive_se = np.array([3.71311161,5.78277879,9.940847,9.98130157,12.93325597])
active_diameter_preg = np.array([92.70733333,113.74933333,121.8715,107.93166667,101.19983333])
active_se_preg = np.array([8.36576993,6.12886374,15.68328409,15.01816237,19.29603708])
active_diameter = np.array([65.587,74.17528571,79.87185714,83.58714286,80.92285714])
active_se = np.array([5.52633482,5.86497481,7.06835057,7.71278033,9.02834107])
num_plot= 101
def main():
## Create a directory to output figures
export_directory = 'output'
if not os.path.exists(export_directory):
os.makedirs(export_directory)
passive_file = 'data/PassiveFits.csv'
active_file = 'data/ActiveFits.csv'
shear_file = 'data/FlowFits.csv'
file = open(passive_file)
passive_data = csv.reader(file)
header = next(passive_data)
rows = []
for row in passive_data:
rows.append(row)
file.close()
D0 = float(rows[0][0])
Cpass = float(rows[1][0])
Cpassdash = float(rows[2][0])
Cpass_preg = float(rows[4][0])
Cpassdash_preg = float(rows[5][0])
D0_preg = float(rows[3][0])
file = open(active_file)
active_data = csv.reader(file)
header = next(active_data)
rows = []
for row in active_data:
rows.append(row)
print(rows)
file.close()
Cact = float(rows[0][0])
Cactdash = float(rows[1][0])
Cactdashdash = float(rows[2][0])
Cmyo = float(rows[3][0])
Cdashdashtone = float(rows[4][0])
Cact_preg = float(rows[5][0])
Cactdash_preg = float(rows[6][0])
Cactdashdash_preg = float(rows[7][0])
Cmyo_preg = float(rows[8][0])
Cdashdashtone_preg = float(rows[9][0])
file = open(shear_file)
shear_data = csv.reader(file)
header = next(shear_data)
rows = []
for row in shear_data:
rows.append(row)
print(rows)
file.close()
Cshear = float(rows[0][0])
Cshear1 = float(rows[1][0])
shear_offset1 = float(rows[2][0])
shear_offset2 = float(rows[3][0])
Cshear_preg = float(rows[4][0])
Cshear1_preg = float(rows[5][0])
shear_offset1_preg = float(rows[6][0])
shear_offset2_preg = float(rows[7][0])
print("Non-pregnant D0 (um) ", D0)
print("Non-pregnant Cpass (N.m)", Cpass/1000.)
print("Non-pregnant Cpassdash (no units)",Cpassdash)
print("Non-pregnant Cact (N.m) ", Cact / 1000.)
print("Non-pregnant Cactdash (no units) ", Cactdash)
print("non-pregnant Cactdashdash (no units)", Cactdashdash)
print("non-pregnant Cmyo (m/N)", Cmyo * 1000.)
print("non-pregnant C'tone (no units)", Cdashdashtone)
print("non-pregnant Cshear (no units)", Cshear)
print("non-pregnant Cshear1 (no units)", Cshear1)
print("non-pregnant tau1 (no units)", shear_offset1)
print("non-pregnant tau2 (no units)", shear_offset2)
print("-------------------------------------")
print("pregnant D0 (um) ", D0_preg)
print("pregnant Cpass (N.m)", Cpass_preg/1000.)
print("pregnant Cpassdash (no units)",Cpassdash_preg)
print("pregnant Cact (N.m) ", Cact_preg / 1000.)
print("pregnant Cactdash (no units) ", Cactdash_preg)
print("pregnant Cactdashdash (no units)", Cactdashdash_preg)
print("pregnant Cmyo (m/N)", Cmyo_preg * 1000.)
print("pregnant C'tone (no units)", Cdashdashtone_preg)
print("pregnant Cshear (no units)", Cshear_preg)
print("pregnant Cshear1 (no units)", Cshear1_preg)
print("pregnant tau1 (no units)", shear_offset1_preg)
print("pregnant tau2 (no units)", shear_offset2_preg)
new_passive_d = np.zeros((num_plot, 1))
new_passive_d_preg = | np.zeros((num_plot, 1)) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 09:08:45 2020
@author: Maryam
"""
#import sys
#in blanca
#print(sys.path)
#sys.path.insert(0, '../SPLT_EEG/')
#sys.path.insert(0, "../SPLT_EEG/modules")
#sys.path.insert(0,"/pl/active/ccnlab/users/zolfaghar/finalCodes_version5.2/github/SPLT_EEG/modules")
#print(sys.path)
#print('------------------------')
import numpy as np
import argparse
import pickle
from sklearn.model_selection import StratifiedKFold, cross_val_score, StratifiedShuffleSplit, \
RepeatedStratifiedKFold
#in blanca
#import read_prep_epochs
#import apply_temp_gen
#print('done')
from modules.read_prep_epochs import read_prep_epochs
from modules.apply_temp_gen import apply_temp_gen
parser = argparse.ArgumentParser()
# Set Path
parser.add_argument("--SAVE_EPOCH_ROOT",
default='../data/preprocessed/epochs/aft_ICA_rej/',
help='Filename for saved preprocessed epochs')
parser.add_argument("--SAVE_RESULT_ROOT",
default='../results/temp_gen/',
help='Filename for saving the results')
# Conditions
parser.add_argument('--cond_filter', choices=['none','non_symm'],
default='none',
help='What type of filter should use')
parser.add_argument('--cond_block', choices=['early','later','rand', 'b3', 'b10', 'b34', 'b910', 'diff'],
default='early',
help='Earlier blocks vs later blocks')
parser.add_argument('--cond_time', choices=['prestim','poststim'],
default='prestim',
help='Period of analysis related to the onset(stim presentation)')
parser.add_argument('--cond_decoding',
choices=['none','removeevoked','resampled'],
default='none',
help='Period of analysis related to the onset(stim presentation)')
parser.add_argument('--mtdt_feat',
choices=['Trgt_Loc_main','Trgt_Loc_prev'],
default='Trgt_Loc_prev',
help='Metadata feature for group data according to)')
# EEG
parser.add_argument('--subj_num', type=int, default=1,
help='subject number')
parser.add_argument('--applyBaseline_bool', action='store_true',
help='apply baseline')
parser.add_argument('--pre_tmin', type=float, default=-0.4,
help='tmin crop for prestim period')
parser.add_argument('--pre_tmax', type=float, default=-0.05,
help='tmax crop for prestim period')
parser.add_argument('--post_tmin', type=float, default=0.05,
help='tmin crop for poststim period')
parser.add_argument('--post_tmax', type=float, default=0.45,
help='tmax crop for poststim period')
parser.add_argument('--occ_channels', action='store_true',
help='only choose channels in occipital areas')
parser.add_argument('--num_classes', type=int, default=2,
help='Number of classes to decode')
parser.add_argument('--normalization_type', choices=['normal','lstmPaper'],
default='normal',
help='Type of normalization')
# Permutation
parser.add_argument('--gen_rand_perm', action='store_true',
help='generate random permutation for each subject')
parser.add_argument('--null_max_iter', type=int, default=10000,
help='max num of iterations in generating null distribution')
parser.add_argument('--loop_null_iter', type=int, default=15,
help='max num of iterations in outer loop to go through sim')
# Decoder
parser.add_argument('--gen_decoder_scores', action='store_true',
help='generate decoder scores for each subject')
parser.add_argument('--n_splits', type=int, default=3,
help='How many folds to use for cross-validation')
parser.add_argument('--random_state', type=int, default=42,
help='random state in LinearSVC')
parser.add_argument('--max_iter', type=int, default=10000,
help='maximum num of iterations in LinearSVC')
parser.add_argument('--n_jobs', type=int, default=1,
help='Number of jobs to use for running the decoder')
parser.add_argument("--scoring",
default='roc_auc',
help='The scoring method using in decoder')
# Plot
parser.add_argument('--smooth_lvl', type=int, default=55,
help='smoothing level for savgol_filter')
"""
main function
"""
def main(args):
# [Grp1, Grp2, Grp3, Grp4, main_ptrn] = read_prep_epochs(args)
[Grp1, Grp2, Grp3, Grp4, Grps_dt, Grps_avg, smooth_evk, main_ptrn] = \
read_prep_epochs(args)
cv = StratifiedShuffleSplit(n_splits=args.n_splits, random_state=args.random_state)
fn_str_sbj='scores_timeGen_%sBlocks_%sFilter_PrePost_decod%s_bsline%s_%sk_%s_Subj_%s' \
%(args.cond_block, args.cond_filter, \
args.cond_decoding, args.applyBaseline_bool, \
args.n_splits, args.mtdt_feat, args.subj_num)
if args.cond_block=='rand':
sc_pck_G, sc_pck_fit_G = apply_temp_gen(args, Grp1, cv)
sc_G, sc_diag_G = sc_pck_G
sc_fit_G, sc_fit_diag_G = sc_pck_fit_G
sc_subj_pck = [sc_G, sc_diag_G, sc_fit_G, sc_fit_diag_G]
else:
sc_pck_G1, sc_pck_fit_G1 = apply_temp_gen(args, Grp1, cv)
sc_pck_G2, sc_pck_fit_G2 = apply_temp_gen(args, Grp2, cv)
sc_pck_G3, sc_pck_fit_G3 = apply_temp_gen(args, Grp3, cv)
sc_pck_G4, sc_pck_fit_G4 = apply_temp_gen(args, Grp4, cv)
# unpack them
sc_G1, sc_diag_G1 = sc_pck_G1
sc_G2, sc_diag_G2 = sc_pck_G2
sc_G3, sc_diag_G3 = sc_pck_G3
sc_G4, sc_diag_G4 = sc_pck_G4
sc_fit_G1, sc_fit_diag_G1 = sc_pck_fit_G1
sc_fit_G2, sc_fit_diag_G2 = sc_pck_fit_G2
sc_fit_G3, sc_fit_diag_G3 = sc_pck_fit_G3
sc_fit_G4, sc_fit_diag_G4 = sc_pck_fit_G4
avg_sc= | np.zeros([4, sc_G1.shape[0], sc_G1.shape[1]]) | numpy.zeros |
# Authors: <NAME> <<EMAIL>>
"""
Support recovery on simulated data (2D)
=======================================
This example shows the advantages of spatially relaxed inference when
dealing with high-dimensional spatial data. To do so, we compare several
statistical methods that aim at recovering the support, i.e., predictive
features. Among those methods some leverage the spatial structure of the
data. For more details about the inference algorithms presented in this
example or about the generative process used to simulate the data,
please refer to Chevalier et al. (2021) [1]_.
This example corresponds to the experiment described in details in
Chevalier et al. (2021) [1]_. Shortly, to simulate the data, we draw
``n_samples`` i.i.d Gaussian vectors of size ``n_features`` and reshape them
into squares (edges are equal to ``n_features ** (1/2)``). Then, to introduce
some spatial structure, we apply a Gaussian filter that correlates features
that are nearby. The 2D data are then flattened into a design matrix ``X`` to
represent it as a regression setting and to ease the computation of the
simulated target ``y`` (see below). Then, we construct the weight map ``w``
which has the same shape as the 2D data, as it contains four predictive
regions in every corner of the square. Similarly as for the construction
of ``X``, the map ``w`` is finally flattened into a vector ``beta``. Lastly,
to derive the target ``y``, we draw a white Gaussian noise ``epsilon`` and
use a linear generative model: ``y = X beta + epsilon``.
The results of this experiment show that the methods that leverage the spatial
structure of the data are relevant. More precisely, we show that clustered
inference algorithms (e.g., CluDL) and ensembled clustered inference algorithms
(e.g., EnCluDL) are more powerful than the standard inference methods (see also
Chevalier et al. (2021) [1]_). Indeed, when the number of features is much
greater than the number of samples, standard statistical methods are
unlikely to recover the support. Then, the idea of clustered inference is to
compress the data without breaking the spatial structure, leading to a
compressed problem close to the original problem. This leads to a
powerful spatially relaxed inference. Indeed, thanks to the dimension reduction
the support recovery is feasible. However, due to the spatial compression,
there is a limited (and quantifiable) spatial uncertainty concerning the shape
of the estimated support. Finally, by considering several choices of
spatial compression, ensembled clustered inference algorithms reduce
significantly the spatial uncertainty compared to clustered inference
algorithms which consider only one spatial compression.
.. _References:
References
----------
.. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2021).
Spatially relaxed inference on high-dimensional linear models.
arXiv preprint arXiv:2106.02590.
"""
#############################################################################
# Imports needed for this script
# ------------------------------
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import FeatureAgglomeration
from hidimstat.scenario import multivariate_simulation
from hidimstat.stat_tools import zscore_from_pval, pval_from_cb
from hidimstat.desparsified_lasso import desparsified_lasso
from hidimstat.clustered_inference import clustered_inference
from hidimstat.ensemble_clustered_inference import ensemble_clustered_inference
#############################################################################
# Specific plotting functions
# ---------------------------
# The functions below are used to plot the results and illustrate the concept
# of spatial tolerance. If you are reading this example for the first time,
# you can skip this section.
#
# The following function builds a 2D map with four active regions that are
# enfolded by thin tolerance regions.
def weight_map_2D_extended(shape, roi_size, delta):
'''Build weight map with visible tolerance region'''
roi_size_extended = roi_size + delta
w = np.zeros(shape + (5,))
w[0:roi_size, 0:roi_size, 0] = 0.5
w[-roi_size:, -roi_size:, 1] = 0.5
w[0:roi_size, -roi_size:, 2] = 0.5
w[-roi_size:, 0:roi_size, 3] = 0.5
w[0:roi_size_extended, 0:roi_size_extended, 0] += 0.5
w[-roi_size_extended:, -roi_size_extended:, 1] += 0.5
w[0:roi_size_extended, -roi_size_extended:, 2] += 0.5
w[-roi_size_extended:, 0:roi_size_extended, 3] += 0.5
for i in range(roi_size_extended):
for j in range(roi_size_extended):
if (i - roi_size) + (j - roi_size) >= delta:
w[i, j, 0] = 0
w[-i-1, -j-1, 1] = 0
w[i, -j-1, 2] = 0
w[-i-1, j, 3] = 0
beta_extended = w.sum(-1).ravel()
return beta_extended
##############################################################################
# To generate a plot that exhibits the true support and the estimated
# supports for every method, we define the two following functions:
def add_one_subplot(ax, map, title):
'''Add one subplot into the summary plot'''
if map is not None:
im = ax.imshow(map)
im.set_clim(-1, 1)
ax.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
labelbottom=False,
labelleft=False)
ax.set_title(title)
else:
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
def plot(maps, titles, save_fig=False):
'''Make a summary plot from estimated supports'''
fig, axes = plt.subplots(3, 2, figsize=(4, 6))
for i in range(3):
for j in range(2):
k = i * 2 + j
add_one_subplot(axes[i][j], maps[k], titles[k])
fig.tight_layout()
if save_fig:
figname = 'figures/simu_2D.png'
plt.savefig(figname)
print(f'Save figure to {figname}')
plt.show()
##############################################################################
# Generating the data
# -------------------
#
# After setting the simulation parameters, we run the function that generates
# the 2D scenario that we have briefly described in the first section of this
# example.
# simulation parameters
n_samples = 100
shape = (40, 40)
n_features = shape[1] * shape[0]
roi_size = 4 # size of the edge of the four predictive regions
sigma = 2.0 # noise standard deviation
smooth_X = 1.0 # level of spatial smoothing introduced by the Gaussian filter
# generating the data
X_init, y, beta, epsilon, _, _ = \
multivariate_simulation(n_samples, shape, roi_size, sigma, smooth_X,
seed=1)
##############################################################################
# Choosing inference parameters
# -----------------------------
#
# The choice of the number of clusters depends on several parameters, such as:
# the structure of the data (a higher correlation between neighboring features
# enable a greater dimension reduction, i.e. a smaller number of clusters),
# the number of samples (small datasets require more dimension reduction) and
# the required spatial tolerance (small clusters lead to limited spatial
# uncertainty). Formally, "spatial tolerance" is defined by the largest
# distance from the true support for which the occurence of a false discovery
# is not statistically controlled (c.f. :ref:`References`).
# Theoretically, the spatial tolerance ``delta`` is equal to the largest
# cluster diameter. However this choice is conservative, notably in the case
# of ensembled clustered inference. For these algorithms, we recommend to take
# the average cluster radius. In this example, we choose ``n_clusters = 200``,
# leading to a theoretical spatial tolerance ``delta = 6``. However, it
# turns out that ``delta = 2``, the average cluster radius, would have been
# sufficient for ensembled clustered inference algorithms (see Results).
# hyper-parameters
n_clusters = 200
# inference parameters
fwer_target = 0.1
delta = 6
# computation parameter
n_jobs = 1
##############################################################################
# Computing z-score thresholds for support estimation
# ---------------------------------------------------
#
# Below, we translate the FWER target into z-score targets.
# To compute the z-score targets we also take into account for the multiple
# testing correction. To do so, we consider the Bonferroni correction.
# For methods that do not reduce the feature space, the correction
# consists in dividing the FWER target by the number of features.
# For methods that group features into clusters, the correction
# consists in dividing by the number of clusters.
# computing the z-score thresholds for feature selection
correction_no_cluster = 1. / n_features
correction_cluster = 1. / n_clusters
thr_c = zscore_from_pval((fwer_target / 2) * correction_cluster)
thr_nc = zscore_from_pval((fwer_target / 2) * correction_no_cluster)
#############################################################################
# Inference with several algorithms
# ---------------------------------
#
# First, we compute a reference map that exhibits the true support and
# the theoretical tolerance region.
# compute true support with visible spatial tolerance
beta_extended = weight_map_2D_extended(shape, roi_size, delta)
#############################################################################
# Now, we compute the support estimated by a high-dimensional statistical
# infernece method that does not leverage the data structure. This method
# was introduced by <NAME>. et al. (2014), <NAME>. et al. (2014)
# and <NAME>. et al.. (2014) (full references are available at
# https://ja-che.github.io/hidimstat/).
# and referred to as Desparsified Lasso.
# compute desparsified lasso
beta_hat, cb_min, cb_max = desparsified_lasso(X_init, y, n_jobs=n_jobs)
pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
pval_from_cb(cb_min, cb_max)
# compute estimated support (first method)
zscore = zscore_from_pval(pval, one_minus_pval)
selected_dl = zscore > thr_nc # use the "no clustering threshold"
# compute estimated support (second method)
selected_dl = np.logical_or(pval_corr < fwer_target / 2,
one_minus_pval_corr < fwer_target / 2)
#############################################################################
# Now, we compute the support estimated using a clustered inference algorithm
# (c.f. :ref:`References`) called Clustered Desparsified Lasso (CluDL) since it
# uses the Desparsified Lasso technique after clustering the data.
# Define the FeatureAgglomeration object that performs the clustering.
# This object is necessary to run the current algorithm and the following one.
connectivity = image.grid_to_graph(n_x=shape[0],
n_y=shape[1])
ward = FeatureAgglomeration(n_clusters=n_clusters,
connectivity=connectivity,
linkage='ward')
# clustered desparsified lasso (CluDL)
beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
clustered_inference(X_init, y, ward, n_clusters)
# compute estimated support (first method)
zscore = zscore_from_pval(pval, one_minus_pval)
selected_cdl = zscore > thr_c # use the "clustering threshold"
# compute estimated support (second method)
selected_cdl = np.logical_or(pval_corr < fwer_target / 2,
one_minus_pval_corr < fwer_target / 2)
#############################################################################
# Finally, we compute the support estimated by an ensembled clustered
# inference algorithm (c.f. :ref:`References`). This algorithm is called
# Ensemble of Clustered Desparsified Lasso (EnCluDL) since it runs several
# CluDL algorithms with different clustering choices. The different CluDL
# solutions are then aggregated into one.
# ensemble of clustered desparsified lasso (EnCluDL)
beta_hat, pval, pval_corr, one_minus_pval, one_minus_pval_corr = \
ensemble_clustered_inference(X_init, y, ward,
n_clusters, train_size=0.3)
# compute estimated support
selected_ecdl = np.logical_or(pval_corr < fwer_target / 2,
one_minus_pval_corr < fwer_target / 2)
#############################################################################
# Results
# -------
#
# Now we plot the true support, the theoretical tolerance regions and
# the estimated supports for every method.
maps = []
titles = []
maps.append(np.reshape(beta, shape))
titles.append('True weights')
maps.append(np.reshape(beta_extended, shape))
titles.append('True weights \nwith tolerance')
maps.append(np.reshape(selected_dl, shape))
titles.append('Desparsified Lasso')
maps.append(None)
titles.append(None)
maps.append( | np.reshape(selected_cdl, shape) | numpy.reshape |
#!/usr/bin/python
# coding: UTF-8
#
# Author: <NAME>
# Contact: <EMAIL>
#
# Feel free to contact for any information.
from __future__ import division, print_function
import logging
import numpy as np
import scipy.optimize as opt
from scipy.linalg import norm as matrix_norm
########################################
## Declaring Class
class Preprocessor(object):
logger = logging.getLogger(__name__)
_peak_types = ["triang", "norm", "lorentz"]
def __init__(self, max_osc=-1, nH=1, energy_ratio=0.1):
self.nH = nH
self.max_osc = max_osc
self.ptype = "norm"
self.energy_ratio = energy_ratio
self.f_min = 0
self.f_max = 1e10
self.theta_init = None
@classmethod
def _remove_peak(cls, t, s, ptype="norm"):
"""Fit and remove peak of a given type"""
if ptype=="norm":
def peak(t, *p):
_t = (t-p[0])/p[2]
return p[1]*np.exp(-_t*_t)
_wd = 0.5
_amp = np.max(s)
_pos = t[s==_amp][0]
elif ptype=="triang":
def peak(t, *p):
s = 1-np.abs((t-p[0])/p[2])
s[s<0] = 0
return p[1]*s
_wd = 1.0
_amp = np.max(s)
_pos = t[s==_amp][0]
elif ptype=="lorentz":
def peak(t, *p):
_t = (t-p[0])/(0.5*p[2])
return p[1]/(_t*_t + 1)
_wd = 0.2
_amp = np.max(s)
_pos = t[s==np.max(s)][0]
else:
raise ValueError("Incorect ptype value. Passed "+str(ptype))
init_guess = ([_pos, _amp, _wd])
bound_min = (max(_pos-2., t[0]), _amp/2, max(_wd-1., 0.01))
bound_max = (min(_pos+2., t[-1]), _amp*2, _wd+1.)
bounds = (bound_min, bound_max)
popt, _ = opt.curve_fit(peak, t, s, init_guess, bounds=bounds)
peakS = peak(t, *popt)
return peakS, popt
@classmethod
def remove_energy(cls, t, S, energy_ratio=0.1, max_peaks=-1, ptype="norm"):
"""Decrease input's energy by removing peaks.
Iteratively fits and removes peaks from provided signal.
Returns input without subtracted peaks and parameters of fitted
peaks, i.e. position, amplitude and width.
Use case for the method is to determine oscillation peaks in
provided Fourier spectrum.
"""
energy = matrix_norm(S)
_S = S.copy()
param = []
while(True):
_peakY, _param = cls._remove_peak(t, _S, ptype)
_S[:] = _S - _peakY
# Trim negative part after peak removal
_S[_S<0] = 0
param.append(_param)
new_energy = matrix_norm(_S)
current_ratio = new_energy/energy
cls.logger.debug("new_energy = {}, (r = {} )".format(new_energy, current_ratio))
# Break if energy ratio is reached
if current_ratio <= energy_ratio:
break
# Break if reached maximum number of peaks
if max_peaks > 0 and len(param) >= max_peaks:
break
return _S, np.array(param)
def determine_params(self, t, S, energy_ratio=0.1, max_peaks=-1, ptype="norm"):
"""Determine oscillation parameters of time series.
Extracts parameters of most influential oscillations by converting
time series into Fourier spectrum and identifying the most pronounce
peaks. Number of identified oscillations depends on energy ratio threshold.
Oscillators are sorted in decreasing order.
Return
------
param -- Parameters for identified oscillators in increasing frequency order.
Numpy array in shape (osc x 4), where fields are:
params[:, 0] -- mean frequencies
params[:, 1] -- amplitudes
params[:, 2] -- error bars
params[:, 3] -- initial phases
"""
freq = np.fft.fftfreq(t.size, t[1]-t[0])
idx = np.r_[freq>=self.f_min] & np.r_[freq<self.f_max]
F = np.fft.fft(S)
fourierS, param = self.remove_energy(freq[idx], np.abs(F[idx]),
energy_ratio=energy_ratio,
max_peaks=max_peaks,
ptype=ptype)
param = param[param[:,0].argsort()[::-1]]
param = param.tolist()
for i, p in enumerate(param):
# Extracting phase
min_idx = np.argmin(np.abs(p[0]-freq))
param[i] = np.append(p, np.angle(F[min_idx]))
# Scaling amplitude
param[i][1] = param[i][1]/len(fourierS)
return np.array(param)
def compute_prior(self, t, S):
"""Computes estimates for KurSL prior parameters.
Return
------
theta -- Initial parameters in form of 2D Numpy array,
where columns are (W, Y0, R, K_).
Note that K_ matrix doesn't have (i,i) elements, as they are zero.
"""
self.param = self.determine_params(t, S,
energy_ratio=self.energy_ratio,
ptype=self.ptype,
max_peaks=self.max_osc)
self.logger.debug("Determined prior parameters: ")
self.logger.debug('\n'.join([str(p) for p in self.param]))
if np.any(self.param[:,:2]<0):
msg = "Something went weirdly wrong. Either frequency or amplitude " \
"was estimated to be negative. What's the sense behind that?\n" \
"Estimates:\n" + str(self.param)
raise AssertionError(msg)
# There's no point in analysing
if(self.param.shape[0]<2):
raise Exception("Single oscillator detected. No very interesting case.")
self.oscN = self.param.shape[0]
self.paramN = 3+self.nH*(self.oscN-1)
# Extract freq in decreasing order
# WARNING! This is fine now, because we're not estimating 'k'
# Otherwise: Swap at the same time rows and cols in K matrix.
W_sort_idx = np.argsort(self.param[:,0])[::-1]
W = self.param[W_sort_idx, 0]*6.28
R = self.param[W_sort_idx, 1]
Y0 = (self.param[W_sort_idx, -1]+2*np.pi)%(2*np.pi)
# Until better idea pops, just start with no coupling
K = np.zeros((self.oscN, self.nH*(self.oscN-1)))
## Reconstructing signal
self.theta_init = np.column_stack((W, Y0, R, K))
self.logger.debug('theta_init: ' + str(self.theta_init))
return self.theta_init
######################################
## MAIN PROGRAMME
if __name__ == "__main__":
import pylab as plt
import sys
from kursl import KurSL
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger(__file__)
###################################################
## Signal generation specific
# Min distance in generated frequency
W_MIN_DIFF = 7
# Type of peak that is fit
ptype = ['norm', 'triang', 'lorentz'][0]
# Time array
tMin, tMax, dt = 0, 5, 0.005
t = np.arange(tMin, tMax, dt)
# Number of oscillators
oscN = 6
nH = 2
###################################################
## Generating KurSL type signal
logger.debug("Generating parameters for KurSL")
W_MIN, W_MAX = 9, 100
Y_MIN, Y_MAX = 0, 2*np.pi
R_MIN, R_MAX = 0, 5
K_MIN, K_MAX = -5, 5
# Making sure that there's W_MIN_DIFF between all W
while True:
W = np.random.random(oscN)*W_MAX + W_MIN
if np.all(np.diff(W)>W_MIN_DIFF): break
R = np.random.random(oscN)*R_MAX + R_MIN
Y0 = | np.random.random(oscN) | numpy.random.random |
"""
Tamplate App - provides a template for new apps
"""
# general imports
import numpy as np
# bokeh imports
from bokeh.io import curdoc
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Arrow, OpenHead, NormalHead
from bokeh.models.glyphs import ImageURL
from bokeh.models.widgets import Button, RadioButtonGroup, RadioGroup
from bokeh.layouts import column, row, Spacer
# internal imports
from TA_surface3d import TA_Surface3d
from TA_custom_class import TA_example_class
from TA_constants import (
slide_support_img, fixed_support_img, # support images
xsl, ysl, xsr, ysr, # support coordinates
support_width, support_height, # support scale
initial_value, start_value, end_value, # slider settings
button_width, # button settings
c_orange # colors used
)
from TA_Spring import TA_Spring
from TA_Mass import TA_CircularMass
from TA_Dashpot import TA_Dashpot
from TA_Coord import TA_Coord
# latex integration
from os.path import dirname, join, split, abspath
import sys, inspect
currentdir = dirname(abspath(inspect.getfile(inspect.currentframe())))
parentdir = join(dirname(currentdir), "shared/")
sys.path.insert(0,parentdir)
from latex_support import LatexDiv, LatexLabel, LatexLabelSet, LatexSlider, LatexLegend
# ----------------------------------------------------------------- #
###################################
# Constants #
###################################
# you may also define constants directly in main if they are only used here
# though defining them in an extra file is recommended for possible extensions
max_x = 5
###################################
# Global Variables #
###################################
# file-global variables (only "global" in this file!)
# see mutable objections in Python (e.g. lists and dictionaries)
global_vars = dict(callback_id=None)
###################################
# ColumnDataSources #
###################################
# define your ColumnDataSources here for a better overview of which data influences plots
# they don't have to be filled but at least defined (and later filled in callback or helper functions)
cds_support_left = ColumnDataSource(data=dict(sp_img=[fixed_support_img], x=[xsl] , y=[ysl]))
cds_support_right = ColumnDataSource(data=dict(sp_img=[slide_support_img], x=[xsr] , y=[ysr]))
cds_arrow = ColumnDataSource(data=dict(xS=[1], xE=[3], yS=[1], yE=[1]))
plot_3D_source = ColumnDataSource(data=dict(x=[], y=[], z=[]))
##################################
# Callback Functions #
##################################
def pp_button_cb_fun():
# define functionality
pass
def play_pause():
if play_pause_button.label == "Play":
global_vars["callback_id"] = curdoc().add_periodic_callback(pp_button_cb_fun,100)
play_pause_button.label = "Pause"
elif play_pause_button.label == "Pause":
curdoc().remove_periodic_callback(global_vars["callback_id"])
play_pause_button.label = "Play"
def slider_cb_fun(attr,old,new):
# define functionality
if(new == end_value):
some_helper_fun() # call helper function
def radio_cb_fun(attr,old,new):
if new==0: # slider without background color
example_slider.css_classes = ["slider"]
elif new==1: # change the background of the slider
example_slider.css_classes = ["slider", "bgcol"]
# NOTE: this just serves as an example
# Bokeh provides an easy python access via example_slider.background = "red"
# Use css_classes only in case if there is no attribute which provides your desired functionality!
# a more detailed example of hiding models can be found in the Dummy App
def radio_cb_fun_2(attr,old,new):
if new==0: # show slider
example_slider.visible = True
elif new==1: # hide slider
example_slider.visible = False
##################################
# Helper Functions #
##################################
# if a callback function might get to large or if several callback functions partly do the same
# outsource it to helper functions
def some_helper_fun():
print("hello, I'm here to help")
###################################
# Figures #
###################################
### define the figure ###
# the shown attributes should always be set
# if no tool is needed, set tools="" or toolbar_location=None
# for more attributes have a look at the bokeh documentation
figure_name = figure(title="Example Figure", x_range=(-1,max_x), y_range=(-0.5,2.5), height=300, width=400, tools="pan, wheel_zoom, reset")
figure_name.toolbar.logo = None # do not display the bokeh logo
### add the support images ###
# urls and coordinates are provided by a ColumnDataSource
# anchor specifies at which position of the image the x and y coordinates are referring to
# width and height could also be set using constants defined in TA_constants.py and imported here in main.py
figure_name.add_glyph(cds_support_left, ImageURL(url="sp_img", x='x', y='y', w=0.66, h=0.4, anchor="center"))
figure_name.add_glyph(cds_support_right, ImageURL(url="sp_img", x='x', y='y', w=support_width, h=support_height, anchor="center"))
### add arrows to the figure ###
# use either Normalheads or Openheads and orange color by default
#arrow_glyph = Arrow(end=NormalHead(line_color=c_orange, fill_color=c_orange), x_start='xS', y_start='yS', x_end='xE', y_end='yE', line_color=c_orange, source=cds_arrow)
arrow_glyph = Arrow(end=OpenHead(line_color=c_orange), x_start='xS', y_start='yS', x_end='xE', y_end='yE', line_color=c_orange, source=cds_arrow)
figure_name.add_layout(arrow_glyph)
### define the 3D plot ###
x = np.arange(0, 300, 10)
y = np.arange(0, 300, 10)
xx, yy = np.meshgrid(x, y)
xx = xx.ravel()
yy = yy.ravel()
value = np.sin(xx / 50) * | np.cos(yy / 50) | numpy.cos |
import numpy as np
import scipy.sparse as sp
from fdfdpy.constants import ETA_0, EPSILON_0, DEFAULT_MATRIX_FORMAT
def sig_w(l, dw, m=4, lnR=-12):
# helper for S()
sig_max = -(m+1)*lnR/(2*ETA_0*dw)
return sig_max*(l/dw)**m
def S(l, dw, omega, L0):
# helper for create_sfactor()
return 1 - 1j*sig_w(l, dw)/(omega*EPSILON_0*L0)
def create_sfactor(wrange, L0, s, omega, Nw, Nw_pml):
# used to help construct the S matrices for the PML creation
sfactor_array = np.ones(Nw, dtype=np.complex128)
if Nw_pml < 1:
return sfactor_array
hw = np.diff(wrange)[0]/Nw
dw = Nw_pml*hw
for i in range(0, Nw):
if s is 'f':
if i <= Nw_pml:
sfactor_array[i] = S(hw * (Nw_pml - i + 0.5), dw, omega, L0)
elif i > Nw - Nw_pml:
sfactor_array[i] = S(hw * (i - (Nw - Nw_pml) - 0.5), dw, omega, L0)
if s is 'b':
if i <= Nw_pml:
sfactor_array[i] = S(hw * (Nw_pml - i + 1), dw, omega, L0)
elif i > Nw - Nw_pml:
sfactor_array[i] = S(hw * (i - (Nw - Nw_pml) - 1), dw, omega, L0)
return sfactor_array
def S_create(omega, L0, N, Npml, xrange,
yrange=None, matrix_format=DEFAULT_MATRIX_FORMAT):
# creates S matrices for the PML creation
M = np.prod(N)
if np.isscalar(Npml):
Npml = | np.array([Npml]) | numpy.array |
import numpy as np
import os, sys
import time
# PART 1 - READING IN SNAPSHOTS AND WRITING POD COEFFICIENTS ----------------------------------
def read_in_snapshots_and_write_out_POD_coeffs(snapshot_data_location, snapshot_file_base, nTime, nDim, field_name, G, cumulative_tol):
# read in snapshots from vtu files ------------------------------------------------------------
print('reading in snapshots from vtu files')
nNodes = get_nNodes_from_vtu(snapshot_data_location, snapshot_file_base )
snapshots_matrix = np.zeros((nDim*nNodes, nTime))
velocity = | np.zeros((nNodes, nDim)) | numpy.zeros |
import warnings
import numpy as np
import pandas as pd
import networkx as nx
import statsmodels.api as sm
def probability_to_odds(prob):
"""Converts given probability (proportion) to odds
Parameters
----------
prob : float, array
Probability or array of probabilities to convert to odds
"""
return prob / (1 - prob)
def odds_to_probability(odds):
"""Converts given odds to probability
Parameters
----------
odds : float, array
Odds or array of odds to convert to probabilities
"""
return odds / (1 + odds)
def exp_map(graph, var):
"""Slow implementation of the exposure mapping functionality. Only supports the sum summary measure.
Still used by the dgm files.
Note
----
Depreciated and no longer actively used by any functions.
Parameters
----------
graph : networkx.Graph
Network to calculate the summary measure for.
var : str
Variable in the graph to calculate the summary measure for
Returns
-------
array
One dimensional array of calculated summary measure
"""
# get adjacency matrix
matrix = nx.adjacency_matrix(graph, weight=None)
# get node attributes
y_vector = np.array(list(nx.get_node_attributes(graph, name=var).values()))
# multiply the weight matrix by node attributes
wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()
return np.asarray(wy_matrix).flatten() # I hate converting between arrays and matrices...
def fast_exp_map(matrix, y_vector, measure):
r"""Improved (computation-speed-wise) implementation of the exposure mapping functionality. Further supports a
variety of summary measures. This is accomplished by using the adjacency matrix and vectors to efficiently
calculate the summary measures (hence the function name). This is an improvement on previous iterations of this
function.
Available summary measures are
Sum (``'sum'``) :
.. math::
X_i^s = \sum_{j=1}^n X_j \mathcal{G}_{ij}
Mean (``'mean'``) :
.. math::
X_i^s = \sum_{j=1}^n X_j \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Variance (``'var'``):
.. math::
\bar{X}_j = \sum_{j=1}^n X_j \mathcal{G}_{ij} \\
X_i^s = \sum_{j=1}^n (X_j - \bar{X}_j)^2 \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Mean distance (``'mean_dist'``) :
.. math::
X_i^s = \sum_{j=1}^n (X_i - X_j) \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Variance distance (``'var_dist'``) :
.. math::
\bar{X}_{ij} = \sum_{j=1}^n (X_i - X_j) \mathcal{G}_{ij} \\
X_i^s = \sum_{j=1}^n ((X_j - X_j) - \bar{X}_{ij})^2 \mathcal{G}_{ij} / \sum_{j=1}^n \mathcal{G}_{ij}
Note
----
If you would like other summary measures to be added or made available, please reach out via GitHub.
Parameters
----------
matrix : array
Adjacency matrix. Should be extract from a ``networkx.Graph`` via ``nx.adjacency_matrix(...)``
y_vector : array
Array of the variable to calculate the summary measure for. Should be in same order as ``matrix`` for
calculation to work as intended.
measure : str
Summary measure to calculate. Options are provided above.
Returns
-------
array
One dimensional array of calculated summary measure
"""
if measure.lower() == 'sum':
# multiply the weight matrix by node attributes
wy_matrix = np.nan_to_num(matrix * y_vector.reshape((matrix.shape[0]), 1)).flatten()
return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...
elif measure.lower() == 'mean':
rowsum_vector = np.sum(matrix, axis=1) # calculate row-sum (denominator / degree)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
weight_matrix = matrix / rowsum_vector.reshape((matrix.shape[0]), 1) # calculate each nodes weight
wy_matrix = weight_matrix * y_vector.reshape((matrix.shape[0]), 1) # multiply matrix by node attributes
return np.asarray(wy_matrix).flatten() # converting between arrays and matrices...
elif measure.lower() == 'var':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanvar(a * y_vector, axis=1)
elif measure.lower() == 'mean_dist':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanmean(c.transpose(), # back-transpose
axis=1)
elif measure.lower() == 'var_dist':
a = matrix.toarray() # Convert matrix to array
a = np.where(a == 0, np.nan, a) # filling non-edges with NaN's
c = (a * y_vector).transpose() - y_vector # Calculates the distance metric (needs transpose)
with warnings.catch_warnings(): # ignores NumPy's RuntimeWarning for isolated nodes (divide by 0)
warnings.simplefilter('ignore', RuntimeWarning)
return np.nanvar(c.transpose(), # back-transpose
axis=1)
else:
raise ValueError("The summary measure mapping" + str(measure) + "is not available")
def exp_map_individual(network, variable, max_degree):
"""Summary measure calculate for the non-parametric mapping approach described in Sofrygin & <NAME> (2017).
This approach works best for networks with uniform degree distributions. This summary measure generates a number
of columns (a total of ``max_degree``). Each column is then an indicator variable for each observation. To keep
all columns the same number of dimensions, zeroes are filled in for all degrees above unit i's observed degree.
Parameters
----------
network : networkx.Graph
The NetworkX graph object to calculate the summary measure for.
variable : str
Variable to calculate the summary measure for (this will always be the exposure variable internally).
max_degree : int
Maximum degree in the network (defines the number of columns to generate).
Returns
-------
dataframe
Data set containing all generated columns
"""
attrs = []
for i in network.nodes:
j_attrs = []
for j in network.neighbors(i):
j_attrs.append(network.nodes[j][variable])
attrs.append(j_attrs[:max_degree])
return pd.DataFrame(attrs,
columns=[variable+'_map'+str(x+1) for x in range(max_degree)])
def network_to_df(graph):
"""Take input network and converts all node attributes to a pandas DataFrame object. This dataframe is then used
within ``NetworkTMLE`` internally.
Parameters
----------
graph : networkx.Graph
Graph with node attributes to transform into data set
Returns
-------
dataframe
Data set containing all node attributes
"""
return pd.DataFrame.from_dict(dict(graph.nodes(data=True)), orient='index')
def bounding(ipw, bound):
"""Internal function to bound or truncate the estimated inverse probablity weights.
Parameters
----------
ipw : array
Estimate inverse probability weights to truncate.
bound : list, float, int, set, array
Bounds to truncate weights by.
Returns
-------
array
Truncated inverse probability weights.
"""
if type(bound) is float or type(bound) is int: # Symmetric bounding
if bound > 1:
ipw = np.where(ipw > bound, bound, ipw)
ipw = np.where(ipw < 1 / bound, 1 / bound, ipw)
elif 0 < bound < 1:
ipw = np.where(ipw < bound, bound, ipw)
ipw = | np.where(ipw > 1 / bound, 1 / bound, ipw) | numpy.where |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = | N.array([0,-1,0,1,0,0,0,0,1]) | numpy.array |
import numpy as np
from scipy.special import gamma, psi
from scipy import stats
from sklearn.neighbors import NearestNeighbors
from typing import Optional
from sklearn.base import BaseEstimator
from sklearn.utils import gen_batches
from .ensemble import Batch, BootStrap
from sklearn.utils import check_random_state, check_array
from typing import Optional, Union
import statsmodels.api as sm
import sys
sys.path.insert(0, "/home/emmanuel/code/rbig")
from rbig import RBIG
class Univariate:
def __init__(self):
pass
@staticmethod
def histogram_entropy(
X: np.ndarray, bins: Union[str, int] = "auto", correction: bool = True
) -> float:
"""Calculates the entropy using the histogram. Option to do a Miller Maddow
correction.
Parameters
----------
"""
# get histogram
hist_counts = np.histogram(X, bins=bins, range=(X.min(), X.max()))
# create random variable
hist_dist = stats.rv_histogram(hist_counts)
# calculate entropy
H = hist_dist.entropy()
# MLE Estimator with Miller-Maddow Correction
if correction == True:
H += 0.5 * (np.sum(hist_counts[0] > 0) - 1) / hist_counts[0].sum()
return H
@staticmethod
def knn_entropy(X: np.ndarray, k: int = 5, algorithm="brute", n_jobs=1):
"""Calculates the Entropy using the knn method.
Parameters
----------
X : np.ndarray, (n_samples x d_dimensions)
The data to find the nearest neighbors for.
k : int, default=10
The number of nearest neighbors to find.
algorithm : str, default='brute',
The knn algorithm to use.
('brute', 'ball_tree', 'kd_tree', 'auto')
n_jobs : int, default=-1
The number of cores to use to find the nearest neighbors
Returns
-------
H : float
Entropy calculated from kNN algorithm
"""
# initialize estimator
knn_clf = KNNEstimator(n_neighbors=k, algorithm=algorithm, n_jobs=n_jobs)
knn_clf.fit(X)
return knn_clf.score(X)
@staticmethod
def kde_entropy(
X: np.ndarray,
kernel="gau",
bw="normal_reference",
gridsize=50,
adjust=1,
cut=3,
clip=(-np.inf, np.inf),
):
# initialize KDE
kde_density = sm.nonparametric.KDEUnivariate(X)
kde_density.fit(bw=bw, gridsize=gridsize, adjust=adjust, cut=cut, clip=clip)
return kde_density.entropy
@staticmethod
def gaussian(X: np.ndarray) -> None:
loc = X.mean(axis=0)
scale = np.cov(X.T)
# assume it's a Gaussian
norm_dist = stats.norm(loc=loc, scale=scale)
return norm_dist.entropy()
class Multivariate:
def __init__(self, seed=123):
self.seed = seed
@staticmethod
def knn_entropy(X: np.ndarray, k: int = 5, algorithm="brute", n_jobs=1):
"""Calculates the Entropy using the knn method.
Parameters
----------
X : np.ndarray, (n_samples x d_dimensions)
The data to find the nearest neighbors for.
k : int, default=10
The number of nearest neighbors to find.
algorithm : str, default='brute',
The knn algorithm to use.
('brute', 'ball_tree', 'kd_tree', 'auto')
n_jobs : int, default=-1
The number of cores to use to find the nearest neighbors
Returns
-------
H : float
Entropy calculated from kNN algorithm
"""
# initialize estimator
knn_clf = KNNEstimator(n_neighbors=k, algorithm=algorithm, n_jobs=n_jobs)
knn_clf.fit(X)
return knn_clf.score(X)
@staticmethod
def expF_entropy(X: np.ndarray) -> None:
n_dims = X.shape[1]
# source params, theta
theta_1 = X.mean(axis=0)
theta_2 = np.cov(X.T)
# natural params, eta
eta_1 = np.linalg.inv(theta_2) @ theta_1[:, None]
eta_2 = 0.5 * np.linalg.inv(theta_2)
# log-normalizer, F(eta)
eta_1_inv = np.linalg.inv(eta_2)
f_eta = (
0.25 * np.trace(eta_1.T @ eta_1_inv @ eta_1)
- 0.5 * np.linalg.slogdet(eta_2)[1]
+ (n_dims / 2.0) * np.log(np.pi)
)
# gradient log normalizer, dF(eta)
df_eta_1 = 0.5 * eta_1_inv @ eta_1
df_eta_2 = -0.5 * eta_1_inv - 0.25 * (eta_1_inv @ eta_1) @ (eta_1_inv @ eta_1).T
# outer product
H = f_eta - ((eta_1 * df_eta_1).sum() + (eta_2 * df_eta_2).sum())
return H
@staticmethod
def gaussian(X: np.ndarray) -> None:
mean = X.mean(axis=0)
cov = np.cov(X.T)
# assume it's a Gaussian
norm_dist = stats.multivariate_normal(mean=mean, cov=cov)
return norm_dist.entropy()
class KNNEstimator(BaseEstimator, Batch):
"""Performs the KNN search to
Parameters
----------
n_neighbors : int, default = 10
The kth neigbour to use for distance
algorithm : str, default='auto'
The algorithm to use for the knn search.
['auto', 'brute', 'kd_tree', 'ball_tree']
* Auto - automatically found
* brute - brute-force search
* kd_tree - KDTree, fast for generalized N-point problems
* ball_tree - BallTree, fast for generalized N-point problems
KDTree has a faster query time but longer build time.
BallTree has a faster build time but longer query time.
n_jobs : int, default=-1
Number of cores to use for nn search
ensemble : bool, default=False
Whether to use an ensemble of estimators via batches
batch_size : int, default=100
If ensemble=True, this determines the number of batches
of data to use to estimate the entropy
kwargs : any extra kwargs to use. Please see
sklearn.neighbors.NearestNeighbors function.
min_dist : float, default=0.0
Ensures that all distances are at least 0.0.
Attributes
----------
H_x : float,
The estimated entropy of the data.
"""
def __init__(
self,
n_neighbors: int = 10,
algorithm: str = "auto",
n_jobs: int = -1,
ensemble=False,
batch_size=100,
kwargs: Optional[dict] = None,
) -> None:
self.n_neighbors = n_neighbors
self.algorithm = algorithm
self.n_jobs = n_jobs
self.ensemble = ensemble
self.kwargs = kwargs
self.batch_size = batch_size
def fit(self, X: np.ndarray, y: Optional[np.ndarray] = None) -> BaseEstimator:
"""
Parameters
----------
X : np.ndarray, (n_samples, n_features)
Data to be estimated.
"""
if self.ensemble:
self.H_x = self._fit_batches(X, self.batch_size)
else:
self.H_x = self._fit(X)
return self
def _fit(self, X: np.ndarray) -> float:
n_samples, d_dimensions = X.shape
# volume of unit ball in d^n
vol = (np.pi ** (0.5 * d_dimensions)) / gamma(0.5 * d_dimensions + 1)
# 1. Calculate the K-nearest neighbors
distances = knn_distance(
X,
n_neighbors=self.n_neighbors + 1,
algorithm=self.algorithm,
n_jobs=self.n_jobs,
kwargs=self.kwargs,
)
# return distance to kth nearest neighbor
distances = distances[:, -1]
# add error margin to avoid zeros
distances += | np.finfo(X.dtype) | numpy.finfo |
import math
import cv2
import numpy as np
from PyQt5.QtCore import QRect, QPoint
from PyQt5.QtGui import QPainter, QPen, QColor, QBrush
from stytra.utilities import HasPyQtGraphParams
class CalibrationException(Exception):
""" """
pass
class Calibrator(HasPyQtGraphParams):
""" """
def __init__(self, mm_px=1):
super().__init__()
self.enabled = False
self.params.setName("stimulus_calibration_params")
self.params.addChildren(
[
{"name": "mm_px", "value": mm_px, "visible": True},
{
"name": "length_mm",
"value": None,
"type": "float",
"suffix": "mm",
"siPrefix": True,
"limits": (1, 200),
"visible": True,
},
{"name": "length_px", "value": None, "visible": True},
{"name": "cam_to_proj", "value": None, "visible": False},
{"name": "proj_to_cam", "value": None, "visible": False},
]
)
self.length_to_measure = "do not use the base class as a calibrator"
self.params["length_mm"] = 1
self.params.child("length_mm").sigValueChanged.connect(self.set_physical_scale)
def toggle(self):
""" """
self.enabled = ~self.enabled
def set_physical_scale(self):
"""Calculate mm/px from calibrator length"""
self.params["mm_px"] = self.params["length_mm"] / self.params["length_px"]
def set_pixel_scale(self, w, h):
""""Set pixel size, need to be called by the projector widget on resizes"""
self.params["length_px"] = w
def make_calibration_pattern(self, p, h, w):
"""
Parameters
----------
p :
h :
w :
Returns
-------
"""
pass
class CrossCalibrator(Calibrator):
""" """
def __init__(self, *args, fixed_length=60, calibration_length="outside", **kwargs):
super().__init__(*args, **kwargs)
self.params["length_px"] = 1
self.length_is_fixed = False
if calibration_length == "outside":
self.outside = True
self.length_to_measure = "height of the rectangle (mm)"
else:
self.outside = False
self.length_to_measure = "a line of the cross" #TODO: world this better, unclear
if fixed_length is not None:
self.params["length_px"] = fixed_length
self.length_is_fixed = True
def make_calibration_pattern(self, p, h, w):
"""
Parameters
----------
p :
h :
w :
Returns
-------
"""
p.setPen(QPen(QColor(255, 0, 0)))
p.setBrush(QBrush(QColor(0, 0, 0)))
p.drawRect(QRect(1, 1, w - 2, h - 2))
l2 = self.params["length_px"] / 2
p.drawLine(w // 2 - l2, h // 2, w // 2 + l2, h // 2)
p.drawLine(w // 2, h // 2 + l2, w // 2, h // 2 - l2)
p.drawLine(w // 2, h // 2 + l2, w // 2 + l2, h // 2 + l2)
def set_pixel_scale(self, w, h):
""""Set pixel size, need to be called by the projector widget on resizes"""
if not self.length_is_fixed:
if self.outside:
self.params["length_px"] = h
else:
self.params["length_px"] = max(h / 2, w / 2)
class CircleCalibrator(Calibrator):
"""" Class for a calibration pattern which displays 3 dots in a 30 60 90 triangle"""
def __init__(self, *args, dh=80, r=3, **kwargs):
super().__init__(*args, **kwargs)
self.dh = dh
self.r = r
self.params["length_px"] = dh
self.points = None
self.points_cam = None
self.length_to_measure = "the largest distance between the points"
def make_calibration_pattern(self, p, h, w, draw=True):
"""
Parameters
----------
p :
h :
w :
draw :
(Default value = True)
Returns
-------
"""
assert isinstance(p, QPainter)
d2h = self.dh // 2
d2w = int(self.dh * math.sqrt(3)) // 2
ch = h // 2
cw = w // 2
# the three points sorted in ascending angle order (30, 60, 90)
centres = [(cw - d2h, ch + d2w), (cw + d2h, ch + d2w), (cw - d2h, ch - d2w)]
centres = np.array(centres)
self.points = centres[np.argsort(CircleCalibrator._find_angles(centres)), :]
if draw:
p.setPen(QPen(QColor(255, 0, 0)))
p.setBrush(QBrush(QColor(255, 0, 0)))
for centre in centres:
p.drawEllipse(QPoint(*centre), self.r, self.r)
@staticmethod
def _find_angles(kps):
"""
Parameters
----------
kps :
Returns
-------
"""
angles = np.empty(3)
for i, pt in enumerate(kps):
pt_prev = kps[(i - 1) % 3]
pt_next = kps[(i + 1) % 3]
# angles are calculated from the dot product
angles[i] = np.abs(
np.arccos(
np.sum((pt_prev - pt) * (pt_next - pt))
/ np.product(
[np.sqrt( | np.sum((pt2 - pt) ** 2) | numpy.sum |
"""Lekhnitskii solutions to homogeneous anisotropic plates with loaded and unloaded holes
Notes
-----
This module uses the following acronyms
* CLPT: Classical Laminated Plate Theory
References
----------
.. [1] <NAME>. (2007). *Stress distribution and strength prediction of composite
laminates with multiple holes* (PhD thesis). Retrieved from
https://rc.library.uta.edu/uta-ir/bitstream/handle/10106/767/umi-uta-1969.pdf?sequence=1&isAllowed=y
.. [2] <NAME>., <NAME>., & <NAME>. (1987). *Anisotropic plates* (2nd ed.).
New York: Gordon and Breach science.
.. [3] <NAME>. and <NAME>. (1981) *Effect of variances and manufacturing
tolerances on the design strength and life of mechanically fastened
composite joints* (Vol. 1,2,3). AFWAL-TR-81-3041.
.. [4] <NAME>. and <NAME>. (1973) *A synthesis procedure for mechanically
fastened joints in advanced composite materials* (Vol. II). AFML-TR-73-145.
"""
import logging
import abc
from collections.abc import Callable
from typing import Any
import numpy as np
import numpy.testing as nptest
from nptyping import NDArray
logger = logging.getLogger(__name__)
def rotate_stress(stresses: NDArray[3, np.float], angle: float = 0.) -> NDArray[3, np.float]:
r"""Rotates 2D stress components by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Parameters
----------
stresses : ndarray
array of [:math: `\sigma_x, \sigma_y, \tau_{xy}`] in-plane stresses
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D array of [:math: `\sigma_x', \sigma_y', \tau_{xy}'`] rotated stresses
"""
c = np.cos(angle)
s = np.sin(angle)
rotation_matrix = np.array([
[c**2, s**2, 2*s*c],
[s**2, c**2, -2*s*c],
[-s*c, s*c, c**2-s**2]
])
stresses = rotation_matrix @ stresses.T
return stresses.T
def rotate_strain(strains: NDArray[3, np.float], angle: float = 0.) -> NDArray[3, float]:
r"""Rotates 2D strain components by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Parameters
----------
strains : ndarray
2D nx3 array of [:math: `\epsilon_x, \epsilon_y, \epsilon_{xy}`] in-plane strains
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D nx3 array of [:math: `\epsilon_x', \epsilon_y', \epsilon_{xy}'`] rotated stresses
"""
c = np.cos(angle)
s = np.sin(angle)
rotation_matrix = np.array([
[c**2, s**2, s*c],
[s**2, c**2, -s*c],
[-2*s*c, 2*s*c, c**2 - s**2]
])
strains = rotation_matrix @ strains.T
return strains.T
def rotate_material_matrix(a_inv: NDArray[(3, 3), np.float], angle: float = 0.) -> NDArray[(3, 3), float]:
r"""Rotates the material compliance matrix by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Notes
-----
This function implements Eq. 9.6 [1]_
Parameters
----------
a_inv : ndarray
2D (3, 3) inverse CLPT A-matrix
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
ndarray
2D (3, 3) rotated compliance matrix
"""
c = np.cos(angle)
s = np.sin(angle)
a11 = a_inv[0, 0]
a12 = a_inv[0, 1]
a16 = a_inv[0, 2]
a22 = a_inv[1, 1]
a26 = a_inv[1, 2]
a66 = a_inv[2, 2]
a11p = a11*c**4 + (2*a12 + a66)*s**2*c**2 + a22*s**4 + (a16*c**2 + a26*s**2)*np.sin(2*angle)
a22p = a11*s**4 + (2*a12 + a66)*s**2*c**2 + a22*c**4 - (a16*s**2 + a26*c**2)*np.sin(2*angle)
a12p = a12 + (a11 + a22 - 2*a12 - a66)*s**2*c**2 + 0.5*(a26 - a16)*np.sin(2*angle)*np.cos(2*angle)
a66p = a66 + 4*(a11 + a22 - 2*a12 - a66)*s**2*c**2 + 2*(a26 - a16)*np.sin(2*angle)*np.cos(2*angle)
a16p = ((a22*s**2 - a11*c**2 + 0.5*(2*a12 + a66)*np.cos(2*angle))*np.sin(2*angle)
+ a16*c**2*(c**2 - 3*s**2) + a26*s**2*(3*c**2 - s**2))
a26p = ((a22*c**2 - a11*s**2 - 0.5*(2*a12 + a66)*np.cos(2*angle))*np.sin(2*angle)
+ a16*s**2*(3*c**2 - s**2) + a26*c**2*(c**2 - 3*s**2))
# test invariants (Eq. 9.7 [2]_)
nptest.assert_almost_equal(a11p + a22p + 2*a12p, a11 + a22 + 2*a12, decimal=4)
nptest.assert_almost_equal(a66p - 4*a12p, a66 - 4*a12, decimal=4)
return np.array([[a11p, a12p, a16p], [a12p, a22p, a26p], [a16p, a26p, a66p]])
def rotate_complex_parameters(mu1: complex, mu2: complex, angle: float = 0.) -> tuple[complex, complex]:
r"""Rotates the complex parameters by given angle
The rotation angle is positive counter-clockwise from the positive x-axis in the cartesian xy-plane.
Notes
-----
Implements Eq. 10.8 [2]_
Parameters
----------
mu1 : complex
first complex parameter
mu2 : complex
second complex parameter
angle : float, default 0.
angle measured counter-clockwise from positive x-axis (radians)
Returns
-------
mu1p, mu2p : complex
first and second transformed complex parameters
"""
c = np.cos(angle)
s = np.sin(angle)
mu1p = (mu1*c - s)/(c + mu1*s)
mu2p = (mu2*c - s)/(c + mu2*s)
return mu1p, mu2p
class Hole(abc.ABC):
"""Abstract parent class for defining a hole in an anisotropic infinite plate
This class defines shared methods and attributes for anisotropic elasticity solutions of plates with circular
holes.
This is an abstract class, do not instantiate this class.
Notes
-----
The following assumptions apply for plates in a state of generalized plane stress.
#. The plates are homogeneous and a plane of elastic symmetry which is parallel to their middle plane
exists at every point.
#. Applied forces act within planes that are parallel and symmetric to the middle plane of the plates,
and have negligible variation through the thickness.
#. Plate deformations are small.
Parameters
----------
diameter : float
hole diameter
thickness : float
laminate thickness
a_inv : array_like
2D (3, 3) inverse of CLPT A-matrix
Attributes
----------
r : float
the hole radius
a : ndarray
(3, 3) inverse a-matrix of the laminate
h : float
thickness of the laminate
mu1 : float
real part of first root of characteristic equation
mu2 : float
real part of second root of characteristic equation
mu1_bar : float
imaginary part of first root of characteristic equation
mu2_bar : float
imaginary part of second root of characteristic equation
"""
MAPPING_PRECISION = 0.0000001
def __init__(self, diameter: float, thickness: float, a_inv: NDArray[(3, 3), float]) -> None:
self.r = diameter/2.
self.a = np.array(a_inv, dtype=float)
self.h = thickness
self.mu1, self.mu2, self.mu1_bar, self.mu2_bar = self.roots()
def roots(self) -> tuple[complex, complex, complex, complex]:
r""" Finds the roots to the characteristic equation
Notes
-----
This method implements Eq. A.2 [1]_ or Eq. 7.4 [2]_
.. math:: a_11\mu^4-2a_16\mu^3+(2a_12+a_66)\mu^2-2a_26\mu+a_22=0
Raises
------
ValueError
If roots cannot be found
"""
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a16 = self.a[0, 2]
a22 = self.a[1, 1]
a26 = self.a[1, 2]
a66 = self.a[2, 2]
roots = np.roots([a11, -2 * a16, (2 * a12 + a66), -2 * a26, a22])
if np.imag(roots[0]) >= 0.0:
mu2 = roots[0]
mu2_bar = roots[1]
elif np.imag(roots[1]) >= 0.0:
mu2 = roots[1]
mu2_bar = roots[0]
else:
raise ValueError("mu1 cannot be solved")
if np.imag(roots[2]) >= 0.0:
mu1 = roots[2]
mu1_bar = roots[3]
elif np.imag(roots[3]) >= 0.0:
mu1 = roots[3]
mu1_bar = roots[2]
else:
raise ValueError("mu2 cannot be solved")
return mu1, mu2, mu1_bar, mu2_bar
def xi_1(self, z1s: NDArray[Any, complex]) -> tuple[NDArray[Any, complex], NDArray[Any, int]]:
r"""Calculates the first mapping parameters
Notes
-----
This method implements Eq. A.4 & Eq. A.5, [1]_ or Eq. 37.4 [2]_
.. math:: \xi_1=\frac{z_1\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}}{a-i\mu_1b}
Parameters
----------
z1s : ndarray
1D array of first parameters from the complex plane :math: `z_1=x+\mu_1y`
Returns
-------
xi_1s : ndarray
1D array of the first mapping parameters
sign_1s : ndarray
1D array of signs producing positive mapping parameters
"""
mu1 = self.mu1
a = self.r
b = self.r
xi_1s = np.zeros(len(z1s), dtype=complex)
sign_1s = np.zeros(len(z1s), dtype=int)
xi_1_pos = (z1s + np.sqrt(z1s * z1s - a * a - mu1 * mu1 * b * b)) / (a - 1j * mu1 * b)
xi_1_neg = (z1s - np.sqrt(z1s * z1s - a * a - mu1 * mu1 * b * b)) / (a - 1j * mu1 * b)
pos_indices = np.where(np.abs(xi_1_pos) >= (1. - self.MAPPING_PRECISION))[0]
neg_indices = np.where(np.abs(xi_1_neg) >= (1. - self.MAPPING_PRECISION))[0]
xi_1s[pos_indices] = xi_1_pos[pos_indices]
xi_1s[neg_indices] = xi_1_neg[neg_indices]
# high level check that all indices were mapped
if not (pos_indices.size + neg_indices.size) == xi_1s.size:
bad_indices = np.where(xi_1s == 0)[0]
logger.warning(f"xi_1 unsolvable\n Failed Indices: {bad_indices}")
sign_1s[pos_indices] = 1
sign_1s[neg_indices] = -1
return xi_1s, sign_1s
def xi_2(self, z2s: NDArray[Any, complex]) -> tuple[NDArray[Any, complex], NDArray[Any, int]]:
r""" Calculates the first mapping parameters
Notes
-----
This method implements Eq. A.4 & Eq. A.5, [1]_ or Eq. 37.4 [2]_
.. math:: \xi_2=\frac{z_2\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}}{a-i\mu_2b}
Parameters
----------
z2s : ndarray
1D array of first parameters from the complex plane :math: `z_1=x+\mu_1y`
Returns
-------
xi_2s : ndarray
1D array of the first mapping parameters
sign_2s : ndarray
1D array of signs producing positive mapping parameters
"""
mu2 = self.mu2
a = self.r
b = self.r
xi_2s = np.zeros(len(z2s), dtype=complex)
sign_2s = np.zeros(len(z2s), dtype=int)
xi_2_pos = (z2s + np.sqrt(z2s * z2s - a * a - mu2 * mu2 * b * b)) / (a - 1j * mu2 * b)
xi_2_neg = (z2s - np.sqrt(z2s * z2s - a * a - mu2 * mu2 * b * b)) / (a - 1j * mu2 * b)
pos_indices = np.where(np.abs(xi_2_pos) >= (1. - self.MAPPING_PRECISION))[0]
neg_indices = np.where(np.abs(xi_2_neg) >= (1. - self.MAPPING_PRECISION))[0]
xi_2s[pos_indices] = xi_2_pos[pos_indices]
xi_2s[neg_indices] = xi_2_neg[neg_indices]
# high level check that all indices were mapped
if not (pos_indices.size + neg_indices.size) == xi_2s.size:
bad_indices = np.where(xi_2s == 0)[0]
logger.warning(f"xi_2 unsolvable\n Failed Indices: {bad_indices}")
sign_2s[pos_indices] = 1
sign_2s[neg_indices] = -1
return xi_2s, sign_2s
@abc.abstractmethod
def phi_1(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_2(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_1_prime(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
@abc.abstractmethod
def phi_2_prime(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
raise NotImplementedError("You must implement this function.")
def stress(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 3), float]:
r""" Calculates the stress at (x, y) points in the plate
Notes
-----
This method implements Eq. 8.2 [2]_
.. math:: \sigma_x=2Re[\mu_1^2\Phi_1'(z_1)+\mu_2^2\Phi_2'(z_2)]
.. math:: \sigma_y=2Re[\Phi_1'(z_1)+\Phi_2'(z_2)]
.. math:: \tau_xy=-2Re[\mu_1\Phi_1'(z_1)+\mu_2\Phi_2'(z_2)]
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
mu1 = self.mu1
mu2 = self.mu2
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z1 = x + mu1 * y
z2 = x + mu2 * y
phi_1_prime = self.phi_1_prime(z1)
phi_2_prime = self.phi_2_prime(z2)
sx = 2.0 * np.real(mu1 * mu1 * phi_1_prime + mu2 * mu2 * phi_2_prime)
sy = 2.0 * np.real(phi_1_prime + phi_2_prime)
sxy = -2.0 * np.real(mu1 * phi_1_prime + mu2 * phi_2_prime)
return np.array([sx, sy, sxy]).T
def displacement(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 2), float]:
r""" Calculates the displacement at (x, y) points in the plate
Notes
-----
This method implements Eq. 8.3 [2]_
.. math:: u=2Re[p_1\Phi_1(z_1)+p_2\Phi_2(z_2)]
.. math:: v=2Re[q_1\Phi_1(z_1)+q_2\Phi_2(z_2)]
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[u0, v0], [u1, v1], ... , [un, vn]]
(n, 2) in-plane displacement components in the cartesian coordinate system
"""
a11 = self.a[0, 0]
a12 = self.a[0, 1]
a16 = self.a[0, 2]
a22 = self.a[1, 1]
a26 = self.a[1, 2]
mu1 = self.mu1
mu2 = self.mu2
p1 = a11*mu1**2 + a12 - a16*mu1
p2 = a11*mu2**2 + a12 - a16*mu2
q1 = a12*mu1 + a22/mu1 - a26
q2 = a12*mu2 + a22/mu2 - a26
x = np.array(x, dtype=float)
y = np.array(y, dtype=float)
z1 = x + mu1 * y
z2 = x + mu2 * y
phi_1 = self.phi_1(z1)
phi_2 = self.phi_2(z2)
u = 2.0 * np.real(p1 * phi_1 + p2 * phi_2)
v = 2.0 * np.real(q1 * phi_1 + q2 * phi_2)
return np.array([u, v]).T
class UnloadedHole(Hole):
r"""Class for defining an unloaded hole in an infinite anisotropic homogeneous plate
This class represents an infinite anisotropic plate with a unfilled circular hole loaded at infinity with
forces in the x, y and xy (shear) directions.
Parameters
----------
loads: array_like
1D array [Nx, Ny, Nxy] force / unit length
diameter: float
hole diameter
thickness: float
laminate thickness
a_inv: array_like
2D array (3, 3) inverse CLPT A-matrix
Attributes
----------
applied_stress : (1, 3) ndarray
[:math:`\sigma_x^*, \sigma_y^*, \tau_{xy}^*`] stresses applied at infinity
"""
def __init__(self, loads: NDArray[3, float], diameter: float, thickness: float,
a_inv: NDArray[(3, 3), float]) -> None:
super().__init__(diameter, thickness, a_inv)
self.applied_stress = np.array(loads, dtype=float) / self.h
def alpha(self) -> complex:
r"""Calculates the alpha loading term for three components of applied stress at infinity
Three components of stress are [:math:`\sigma_{x}^*, \sigma_{y}^*, \tau_{xy}^*`]
Notes
-----
This method implements Eq. A.7 [1]_ which is a combination of Eq. 38.12 & Eq. 38.18 [2]_
.. math:: \alpha_1=\frac{r}{2}(\tau_{xy}^*i-\sigma_{y}^*)
Returns
-------
complex
first fourier series term for applied stress at infinity
"""
sy = self.applied_stress[1]
sxy = self.applied_stress[2]
r = self.r
return 1j * sxy * r / 2 - sy * r / 2
def beta(self) -> complex:
r"""Calculates the beta loading term for three components of applied stress at infinity
Three components of stress are [:math:`\sigma_x^*, \sigma_y^*, \tau_{xy}^*`]
Notes
-----
This method implements Eq. A.7 [1]_ which is a combination of Eq. 38.12 & Eq. 38.18 [2]_
.. math:: \beta_1=\frac{r}{2}(\tau_{xy}^*-\sigma_x^*i)
Returns
-------
complex
first fourier series term for applied stresses at infinity
"""
sx = self.applied_stress[0]
sxy = self.applied_stress[2]
r = self.r
return sxy * r / 2 - 1j * sx * r / 2
def phi_1(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates the first stress function
Notes
-----
This method implements Eq. A.6 [1]_
.. math:: C_1=\frac{\beta_1-\mu_2\alpha_1}{\mu_1-\mu_2}
.. math:: \Phi_1=\frac{C_1}{\xi_1}
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_1, sign_1 = self.xi_1(z1)
C1 = (beta - mu2 * alpha) / (mu1 - mu2)
return C1 / xi_1
def phi_2(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates the second stress function
Notes
-----
This method implements Eq. A.6 [1]_
.. math:: C_2=-\frac{\beta_1-\mu_1\alpha_1}{\mu_1-\mu_2}
.. math:: \Phi_2=\frac{C_2}{\xi_2}
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_2, sign_2 = self.xi_2(z2)
C2 = -(beta - mu1 * alpha) / (mu1 - mu2)
return C2 / xi_2
def phi_1_prime(self, z1: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates derivative of the first stress function
Notes
-----
This method implements Eq. A.8 [1]_
.. math:: C_1=\frac{\beta_1-\mu_2\alpha_1}{\mu_1-\mu_2}
.. math:: \eta_1=\frac{z_1\pm\sqrt{z_1^2-a^2-\mu_1^2b^2}}{a-i\mu_1b}
.. math:: \kappa_1=\frac{1}{a-i\mu_1b}
.. math:: \Phi_1'=-\frac{C_1}{\xi_1^2}(1+\frac{z_1}{\eta_1})\kappa_1
Parameters
----------
z1 : ndarray
1D complex array first mapping parameter
Returns
-------
ndarray
1D complex array
"""
a = self.r
b = self.r
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_1, sign_1 = self.xi_1(z1)
C1 = (beta - mu2 * alpha) / (mu1 - mu2)
eta1 = sign_1 * np.sqrt(z1 * z1 - a * a - mu1 * mu1 * b * b)
kappa1 = 1 / (a - 1j * mu1 * b)
return -C1 / (xi_1 ** 2) * (1 + z1 / eta1) * kappa1
def phi_2_prime(self, z2: NDArray[Any, complex]) -> NDArray[Any, complex]:
r"""Calculates derivative of the second stress function
Notes
-----
This method implements Eq. A.8 [1]_
.. math:: C_2=-\frac{\beta_1-\mu_1\alpha_1}{\mu_1-\mu_2}
.. math:: \eta_2=\frac{z_2\pm\sqrt{z_2^2-a^2-\mu_2^2b^2}}{a-i\mu_2b}
.. math:: \kappa_2=\frac{1}{a-i\mu_2b}
.. math:: \Phi_2'=-\frac{C_2}{\xi_2^2}(1+\frac{z_2}{\eta_2})\kappa_2
Parameters
----------
z2 : ndarray
1D complex array second mapping parameter
Returns
-------
ndarray
1D complex array
"""
a = self.r
b = self.r
mu1 = self.mu1
mu2 = self.mu2
alpha = self.alpha()
beta = self.beta()
xi_2, sign_2 = self.xi_2(z2)
C2 = -(beta - mu1 * alpha) / (mu1 - mu2)
eta2 = sign_2 * np.sqrt(z2 * z2 - a * a - mu2 * mu2 * b * b)
kappa2 = 1 / (a - 1j * mu2 * b)
return -C2 / (xi_2 ** 2) * (1 + z2 / eta2) * kappa2
def stress(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 3), float]:
r""" Calculates the stress at (x, y) points in the plate
Parameters
----------
x : array_like
1D array x locations in the cartesian coordinate system
y : array_like
1D array y locations in the cartesian coordinate system
Returns
-------
ndarray
[[sx0, sy0, sxy0], [sx1, sy1, sxy1], ... , [sxn, syn, sxyn]]
(n, 3) in-plane stress components in the cartesian coordinate system
"""
sx, sy, sxy = super().stress(x, y).T
sx_app = self.applied_stress[0]
sy_app = self.applied_stress[1]
sxy_app = self.applied_stress[2]
return np.array([sx + sx_app, sy + sy_app, sxy + sxy_app]).T
def _remove_bad_displacments(displacement_func:
Callable[[object, NDArray[Any, float], NDArray[Any, float]], NDArray[(Any, 2), float]]):
""" removes displacements that are 180 degrees behind bearing load direction"""
def inner(self, x: NDArray[Any, float], y: NDArray[Any, float]) -> NDArray[(Any, 2), float]:
# call displacement function
displacements = displacement_func(self, x, y)
# check if any points are 180 degrees behind bearing load
r, angles = self._cartesian_to_polar(x, y)
bad_angle = np.pi if self.theta == 0 else -1*(np.pi - self.theta)
# if so, replace those results with np.nan
displacements[ | np.isclose(angles, bad_angle) | numpy.isclose |
# -*- coding: utf-8 -*-
import numpy as np
from ..base import Layer
from ztlearn.utils import clip_gradients as cg
from ztlearn.dl.initializers import InitializeWeights as init
from ztlearn.dl.activations import ActivationFunction as activate
from ztlearn.dl.optimizers import OptimizationFunction as optimizer
class GRU(Layer):
def __init__(self, h_units, activation = 'tanh', input_shape = None, gate_activation = 'sigmoid'):
self.h_units = h_units # number of hidden states
self.activation = activation
self.input_shape = input_shape
self.gate_activation = gate_activation
self.init_method = None # just added
self.optimizer_kwargs = None # just added
# gate weights
self.W_update = None
self.W_reset = None
self.W_states = None
# gate bias
self.b_update = None
self.b_reset = None
self.b_states = None
# final output to nodes weights
self.W_final = None
# final output to nodes bias
self.b_final = None
def prep_layer(self):
_, input_dim = self.input_shape
z_dim = self.h_units + input_dim # concatenate (h_units, vocabulary_size) vector
# gate weights
self.W_update = init(self.init_method).initialize_weights((z_dim, self.h_units))
self.W_reset = init(self.init_method).initialize_weights((z_dim, self.h_units))
self.W_cell = init(self.init_method).initialize_weights((z_dim, self.h_units))
self.W_states = init(self.init_method).initialize_weights((z_dim, self.h_units))
# gate hidden bias
self.b_update = np.zeros((self.h_units,))
self.b_reset = np.zeros((self.h_units,))
self.b_cell = np.zeros((self.h_units,))
self.b_states = np.zeros((self.h_units,))
# final output to nodes weights (input_dim is the vocab size and also the ouput size)
self.W_final = init(self.init_method).initialize_weights((self.h_units, input_dim))
# final output to nodes bias (input_dim is the vocab size and also the ouput size)
self.b_final = np.zeros((input_dim,))
@property
def weight_initializer(self):
return self.init_method
@weight_initializer.setter
def weight_initializer(self, init_method):
self.init_method = init_method
@property
def weight_optimizer(self):
return self.optimizer_kwargs
@weight_optimizer.setter
def weight_optimizer(self, optimizer_kwargs = {}):
self.optimizer_kwargs = optimizer_kwargs
@property
def layer_activation(self):
return self.activation
@layer_activation.setter
def layer_activation(self, activation):
self.activation = activation
@property
def output_shape(self):
return self.input_shape
def pass_forward(self, inputs, train_mode = True):
self.inputs = inputs
batch_size, time_steps, input_dim = inputs.shape
self.update = np.zeros((batch_size, time_steps, self.h_units))
self.reset = | np.zeros((batch_size, time_steps, self.h_units)) | numpy.zeros |
import os
import time
import math
import json
import codecs
import argparse ##python自带的命令行参数解析包
import numpy as np
from tqdm import tqdm
from numpy import finfo
from sklearn.metrics import accuracy_score
from transformers import BertTokenizer
from pytorch_pretrained_bert import BertModel
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.autograd import Variable
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from distributed import apply_gradient_allreduce
import parse_nk
from models import G2PTransformerMask, poly_tonesandhi, Cascaded_Tacotron2
from data_utils import TextMelLoader, TextMelCollate, G2PDatasetMask, get_dataloader, polyTTS_get_dataloader
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
def reduce_tensor(tensor, n_gpus): ##?
rt = tensor.clone() ## 返回一个张量的副本,其与原张量的尺寸和数据类型相同
dist.all_reduce(rt, op=dist.reduce_op.SUM) ## 在所有机器上减少张量数据,通过获得最终的结果。在调用之后张量在所有过程中都是按位相同的。
rt /= n_gpus ## /=是除法赋值运算符 rt=rt/n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
## cuda是否可用,cuda(compute unified device architecture)是显卡厂商NVIDIA推出的运算平台
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
## torhc.cuda.set_device(device)设置当前设备。不鼓励使用此函数来设置,在大多数情况下,最好使用CUDA_VISIBLE_DEVICES环境变量。参数device(int)-所选设备,如果此参数为负,则此函数是无效操作。
## torch.cuda.device_count() 返回可得到的GPU数量
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
## pytorch分布式训练
## backend str/Backend 是通信所用的后端,可以是'nccl''gloo'或者是一个torch.distributed.Backend类(Backend.GLOO)
## init_method str 这个URL指定了如何初始化互相通信的进程
## world_size init 执行训练的所有进程数
## rank int 这个进程的编号,也是其优先级
## group_name str 进程所在group的name
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
## if not 用法: if true才执行 (即 if not false)
if not hparams.load_mel_from_disk:
trainset = TextMelLoader(hparams.training_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
else:
trainset = TextMelLoader(hparams.mel_training_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
valset = TextMelLoader(hparams.mel_validation_files, hparams.polyphone_dict_files, hparams.mask_dict_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step, hparams.num_classes)
if hparams.distributed_run: ##False
train_sampler = DistributedSampler(trainset)
## 在多机多卡情况下分布式训练数据的读取,不同的卡读到的数据应该是不同的,利用sampler确保dataloader只会load到整个数据集的一个特定子集
## 它为每个子进程划分出一部分数据集,以避免不同进程之间的数据重复。
shuffle = False
else:
train_sampler = None
shuffle = True
## 定义一个可迭代的数据加载器
train_loader = DataLoader(trainset, num_workers=0, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
## dataset(Dataset类,决定数据从哪里读取及如何读取) batch_size(每个batch的大小,批大小) shuffle(是否进行shuffle操作,每个epoch是否乱序)
## num_workers(加载数据时使用几个子进程) drop_last(当样本数不能被batchsize整除时,是否舍弃最后一批数据)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Cascaded_Tacotron2(hparams).cuda()## 参数是hparams,因为继承了nn.module,所以有.cuda()
if hparams.fp16_run: ## False
model.decoder.attention_layer.score_mask_value = finfo('float16').min ##?
if hparams.distributed_run: ## False
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=0,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
val_mel_loss = 0.0
val_gate_loss = 0.0
val_select_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
# original_words = x[3]
# # print('CHECK original_words IN validate:', original_words)
# _, _, select_target = y
# select_target = np.array(select_target.cpu())
# # print('CHECK select_target IN validate:', select_target)
# np.savetxt('select_target.txt',select_target)
# _, _, _, _, select_pred = y_pred
# select_pred = np.array(select_pred.cpu())
# select_pred = np.argmax(select_pred, axis=2)
# # print('CHECK select_pred IN validate:', select_pred)
# np.savetxt('select_pred.txt',select_pred)
# mask_padded_to_show = np.array(mask_padded.cpu())
# mask_padded_to_show = np.sum(mask_padded_to_show, axis=2)
# # print('CHECK mask_padded_to_show IN validate:', mask_padded_to_show)
# np.savetxt('select_mask.txt',mask_padded_to_show)
mask_padded = x[3]
loss, mel_loss, gate_loss, select_loss = criterion(y_pred, y, mask_padded)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
reduced_val_mel_loss = reduce_tensor(mel_loss.data, n_gpus).item()
reduced_val_gate_loss = reduce_tensor(gate_loss.data, n_gpus).item()
reduced_val_select_loss = reduce_tensor(select_loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
reduced_val_mel_loss = mel_loss.item()
reduced_val_gate_loss = gate_loss.item()
reduced_val_select_loss = select_loss.item()
val_loss += reduced_val_loss
val_mel_loss += reduced_val_mel_loss
val_gate_loss += reduced_val_gate_loss
val_select_loss += reduced_val_select_loss
val_loss = val_loss / (i + 1)
val_mel_loss = val_mel_loss / (i + 1)
val_gate_loss = val_gate_loss / (i + 1)
val_select_loss = val_select_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, val_mel_loss, val_gate_loss, val_select_loss, model, y, y_pred, iteration)
def train_tts(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed) ##设置(CPU)生成随机数的种子,在每次重新运行程序时,同样的随机数生成代码得到的是同样的结果。
torch.cuda.manual_seed(hparams.seed)## 设置当前GPU的随机数生成种子 torch.cuda.manual_seed_all(seed)设置所有GPU的随机数生成种子
## 手动设置种子一般可用于固定随机初始化的权重值,这样就可以让每次重新从头训练网络时的权重的初始值虽然是随机生成的但却是固定的。
model = load_model(hparams)
learning_rate = hparams.learning_rate
# optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
# weight_decay=hparams.weight_decay)
for name, param in model.named_parameters():
# frozen except tts
# if name.split('.')[0] == 'poly_phoneme_classifier':
# param.requires_grad = False
# frozen poly module except tone sandhi & tts
# if name.split('.')[0] == 'poly_phoneme_classifier':
# if name.split('.')[1] != 'linear_pre' and name.split('.')[1] != 'conv_layers' and name.split('.')[1] != 'linear_aft':
# param.requires_grad = False
# frozen except structure CNN & tonesandhi & tts
if name.split('.')[0] == 'poly_phoneme_classifier':
if name.split('.')[1] == 'g2ptransformermask':
if name.split('.')[2] != 'structure_cnn_tts':
param.requires_grad = False
elif name.split('.')[1] != 'linear_pre' and name.split('.')[1] != 'conv_layers' and name.split('.')[1] != 'linear_aft':
param.requires_grad = False
# else:
# param.requires_grad = False
training_parameters_list = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(training_parameters_list, lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
## apex是一款由Nvidia开发的基于PyTorch的混合精度训练加速神奇,用短短几行代码就能实现不同程度的混合精度加速,训练时间直接缩小一半。
## fp16:半精度浮点数,是一种计算机使用的二进制浮点数数据类型,使用2字节(16位)存储。
## fp16优点:减少显存占用;加快训练和推断的计算;张量核心的普及。缺点:量化误差。
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
start = time.perf_counter() ## 返回当前的计算机系统时间
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
# print('CHECK batch:', batch)
model.zero_grad()
x, y = model.parse_batch(batch)
y_pred = model(x)
mask_padded = x[3]
loss, mel_loss, gate_loss, select_loss = criterion(y_pred, y, mask_padded) ## Tacotron2Loss(model_output,targets,mask_padded)
## 区分几种loss
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
reduced_val_mel_loss = reduce_tensor(mel_loss.data, n_gpus).item()
reduced_val_gate_loss = reduce_tensor(gate_loss.data, n_gpus).item()
reduced_val_select_loss = reduce_tensor(select_loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
reduced_val_mel_loss = mel_loss.item()
reduced_val_gate_loss = gate_loss.item()
reduced_val_select_loss = select_loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# print('CHECK structure_cnn.convs.0.weight IS CHANGE:', model.structure_cnn.convolutions[0][0].conv.weight)
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
## 在用pytorch训练模型时,通常会在遍历epochs的过程中依次用到optimizer.zero_grad(),loss.backward(),optimizer.step()三个函数,总的来说,这三个函数的作用是先将梯度归零(optimizer.zero_grad()),
## 然后反向传播计算得到每个参数的梯度值(loss.backward()),最后通过梯度下降执行一步参数更新(optimizer.step())
if not is_overflow and rank == 0:
duration = time.perf_counter() - start ## time.perf_counter()返回当前的计算机系统时间,只有连续两次perf_counter()进行差值才能有意义,一般用于计算程序运行时间。
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, reduced_val_mel_loss, reduced_val_gate_loss, reduced_val_select_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
class Mask_Softmax(nn.Module):
def __init__(self, plus=1.0):
super(Mask_Softmax, self).__init__()
self.plus = plus
def forward(self, logits, output_mask):
logits = logits + (output_mask + 1e-45).log()
return torch.nn.functional.log_softmax(logits, dim=-1)
class Gumbel_Softmax(nn.Module):
def __init__(self, temperature=1):
super(Gumbel_Softmax, self).__init__()
self.softmax = nn.Softmax(dim=-1) ## dim=-1 最后一维取softmax
# initial temperature for gumbel softmax (default: 1)
self.temperature = temperature
self.mask_softmax = Mask_Softmax()
# self.mask_softmax = nn.LogSoftmax()
def forward(self, logits, output_mask, hard=False):
y = self._gumbel_softmax_sample(logits, output_mask, hard)
return y
def _sample_gumbel(self, shape, eps=1e-20):
U = torch.rand(shape)
return -torch.log(-torch.log(U + eps) + eps)
def _gumbel_softmax_sample(self, logits, output_mask, hard=False):
sample = Variable(self._sample_gumbel(logits.size()[-1]), requires_grad=True)
if logits.is_cuda:
sample = sample.cuda()
y = logits + sample
# return self.softmax(y / self.temperature)
y_soft = self.mask_softmax(y / self.temperature, output_mask)
# y_soft = self.mask_softmax(y / self.temperature)
if hard:
# Straight through.
index = y_soft.max(-1, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(-1, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
# Reparametrization trick.
ret = y_soft
return ret
def masked_augmax(logits, mask, dim, min_val=-1e7):
logits = logits.exp()
logits = logits.mul(mask)
# one_minus_mask = (1.0 - mask).byte()
# replaced_vector = vector.masked_fill(one_minus_mask, min_val)
# max_value, _ = replaced_vector.max(dim=dim)
max_value = torch.argmax(logits, dim=1)
return max_value
def train_poly(args, hparams):
torch.manual_seed(42)
torch.cuda.manual_seed(42)
np.random.seed(42)
print('CHECK HERE train poly ONLY')
train_dataloader = get_dataloader(hparams.use_output_mask, hparams.train_file, hparams.train_label,
hparams, hparams.poly_batch_size,
hparams.poly_max_length, shuffle=True)
val_dataloader = get_dataloader(hparams.use_output_mask, hparams.val_file, hparams.val_label,
hparams, hparams.poly_batch_size,
hparams.poly_max_length, shuffle=True)
# test_dataloader = get_dataloader(args.use_output_mask, args.test_file, args.test_label,
# args.class2idx, args.merge_cedict, args.poly_batch_size,
# args.max_length, shuffle=True)
with codecs.open(hparams.class2idx, 'r', 'utf-8') as usernames:
class2idx = json.load(usernames)
print("num classes: {}".format(len(class2idx)))
num_classes = len(class2idx)
model = G2PTransformerMask(num_classes, hparams)
device = torch.cuda.current_device() ## 查看当前使用的gpu序号
model = model.to(device) ## 将模型加载到指定设备上
for name, param in model.named_parameters():
# frozen syntax module
if name.split('.')[0] != 'tree_shared_linear' and name.split('.')[0] != 'structure_cnn_poly' \
and name.split('.')[0] != 'linear_pre' and name.split('.')[0] != 'poly_phoneme_classifier' \
and name.split('.')[0] != 'linear_aft':
param.requires_grad = False
training_parameters_list = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.Adam(training_parameters_list, lr=hparams.poly_lr)
criterion = nn.NLLLoss()
# mask_criterion = Mask_Softmax()
mask_criterion = Gumbel_Softmax()
model_dir = "./save/poly_only_syntax_frozen"
if not os.path.exists(model_dir):
os.makedirs(model_dir)
best_acc = 0
for epoch in range(hparams.poly_epochs):
model.train()
for idx, batch in enumerate(train_dataloader, start=1):
# print('CEHCK batch:', batch)
# if idx > 200:
# break
batch = tuple(t.to(device) for t in batch)
if hparams.use_output_mask:
input_ids, poly_ids, labels, output_mask = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
else:
input_ids, poly_ids, labels = batch
mask = torch.sign(input_ids) ## torch.sign(input,out=None) 符号函数,返回一个新张量,包含输入input张量每个元素的正负(大于0的元素对应1,小于0的元素对应-1,0还是0)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
# inputs = {"input_ids": input_ids,
# "poly_ids": poly_ids,
# "attention_mask": mask}
logits, _ = model(**inputs)
batch_size = logits.size(0)
logits = logits[torch.arange(batch_size), poly_ids]
# logits = mask_criterion(logits, output_mask, True)
logits = mask_criterion(logits, output_mask)
loss = criterion(logits, labels)
loss.backward()
# nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
model.zero_grad()
if idx % 100 == 0: ## %取余
print("loss : {:.4f}".format(loss.item()))
all_preds = []
all_mask_preds = []
all_labels = []
model.eval()
for batch in tqdm(val_dataloader, total=len(val_dataloader)):
batch = tuple(t.to(device) for t in batch)
# input_ids, poly_ids, labels = batch
# mask = torch.sign(input_ids)
# inputs = {"input_ids": input_ids,
# "poly_ids": poly_ids,
# "attention_mask": mask}
if hparams.use_output_mask:
input_ids, poly_ids, labels, output_mask = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
else:
input_ids, poly_ids, labels = batch
mask = torch.sign(input_ids)
inputs = {"input_ids": input_ids,
"poly_ids": poly_ids,
"attention_mask": mask}
with torch.no_grad():
logits, _ = model(**inputs)
batch_size = logits.size(0)
logits = logits[torch.arange(batch_size), poly_ids]
# logits = logits.exp()
# output_mask_false = 1.0 - output_mask
# logits = logits - output_mask_false
# logits = mask_criterion(logits, output_mask, True)
logits = mask_criterion(logits, output_mask)
preds = torch.argmax(logits, dim=1).cpu().numpy()
mask_preds = masked_augmax(logits, output_mask, dim=1).cpu().numpy()
if not (preds == mask_preds).all():
print('CHECK preds:', preds)
print('CHECK mask_preds:', mask_preds)
print('CHECK labels:', labels)
print('CHECK output_mask:', np.where(output_mask.cpu().numpy()==1.0))
all_preds.append(preds)
all_mask_preds.append(mask_preds)
all_labels.append(labels.cpu().numpy())
preds = | np.concatenate(all_preds, axis=0) | numpy.concatenate |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
Test suite for the util.py module.
The tests must be linked with a function space class object in the setUp method:
to run the use:
from esys.bruce import Brick
class Test_utilOnBruce(Test_util_no_tagged_data):
def setUp(self):
self.domain = Brick(10,10,13)
self.functionspace = ContinuousFunction(self.domain)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(Test_utilOnBruce))
unittest.TextTestRunner(verbosity=2).run(suite)
This test assumes that samples with x_0 coordinate 0 are tagged with 1 and all samples tagged with 1 have x_0
coordinate 0.
:note: at this stage this test will not pass as it tests for functionlity that has not been implemented yet. It also
does not test the full functionalitu of util.py yet.
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base, Test_util_values
from test_util_reduction_new import Test_util_reduction_new
from test_util_unary_new import Test_util_unary_new
from test_util_binary_new import Test_util_binary_new
from test_util_binary_leftover import Test_util_binary_leftover
## these aspects are test in the _new tests
#from test_util_overloaded_binary_no_tagged_data import Test_util_overloaded_binary_no_tagged_data
#from test_util_overloaded_binary_with_tagged_data import Test_util_overloaded_binary_with_tagged_data
#from test_util_unary_no_tagged_data import Test_util_unary_no_tagged_data
#from test_util_unary_with_tagged_data import Test_util_unary_with_tagged_data
#from test_util_binary_no_tagged_data import Test_util_binary_no_tagged_data
#from test_util_binary_with_tagged_data import Test_util_binary_with_tagged_data
from test_util_spatial_functions1 import Test_Util_SpatialFunctions_noGradOnBoundary_noContact
from test_util_spatial_functions2 import Test_Util_SpatialFunctions_noGradOnBoundary
from test_util_spatial_functions3 import Test_Util_SpatialFunctions
from test_util_slicing_no_tagged_data import Test_util_slicing_no_tagged_data
from test_util_slicing_with_tagged_data import Test_util_slicing_with_tagged_data
class Test_util_reduction(Test_util_reduction_new):
""" test for reduction operation Lsup,sup,inf for all data types"""
pass
class Test_util_unary(Test_util_unary_new):
""" all unary tests """
pass
class Test_util_binary(Test_util_binary_new, Test_util_binary_leftover):
"""
test for all binary operation
"""
pass
## Testing of these ops is now in Test_util_binary
#class Test_util_overloaded_binary(Test_util_overloaded_binary_no_tagged_data,Test_util_overloaded_binary_with_tagged_data):
#"""test for all overloaded operation"""
#pass
class Test_util(Test_util_unary_new,Test_util_reduction_new, Test_util_binary):
"""all tests"""
pass
class Test_util_overloaded_binary_still_failing(Test_util_base):
"""
these overloaded operations still fail!
- wrong return value of Data binaries (Mantis 0000054)
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.93686078973,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.51662736235119944, 2.8171396846123073])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.4202334273802917, -2.1197211051191838]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(-2.22764991169,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[2.0746979587719538, 0.99992890307042437, -2.3128078094931848, -4.0103712739722654,
4.8853529531011013],
[0.09856857946648212, 0.73520899085847624, -3.6585265509750844, 3.0095320582437939, 3.4125902906059444],
[1.4894150898632059,
-1.4124339049368793, 1.5397397961722188, 4.8841402613336111, 1.1241155288598881], [2.8283598865494408,
1.5980765295723476,
-1.0022373011497274, -2.0622178471715067, 4.9699555072046042]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.15295195292152819, -1.2277210086230577, -4.5404577211866668, -6.2380211856657475,
2.6577030414076193],
[-2.1290813322269999, -1.4924409208350058, -5.8861764626685664, 0.78188214655031185, 1.1849403789124624],
[-0.73823482183027611,
-3.6400838166303613, -0.68791011552126324, 2.6564903496401291, -1.103534382833594], [0.60070997485595878,
-0.62957338212113445,
-3.2298872128432095, -4.2898677588649887, 2.7423055955111222]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(-4.67318656609,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.9409337165894076, 1.6101568824796857], [1.2441782896909706, 1.2872758759353298]],
[[4.022494973005406,
-2.758155583474049], [1.8311643900357311, 4.0940647266277157]], [[2.5378127449303243, 0.063283784588161751],
[4.5495644157820809,
2.8673770080506742]], [[-0.93484143473477577, 4.914438575705228], [-1.951066895455166, -1.2021165219313259]],
[[-0.4220608661301819, -4.9682501775464418], [0.98338081352961559, 3.4054674805751066]], [[3.9967556325744127,
-4.7659141789100659],
[0.34265275409881024, -0.25226631819007572]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-0.73225284950136693, -3.0630296836110888], [-3.429008276399804, -3.3859106901554448]],
[[-0.6506915930853685, -7.4313421495648235], [-2.8420221760550435, -0.57912183946305884]],
[[-2.1353738211604503,
-4.6099027815026128], [-0.12362215030869361, -1.8058095580401003]], [[-5.6080280008255503,
0.24125200961445348],
[-6.6242534615459405, -5.8753030880221004]], [[-5.0952474322209564, -9.6414367436372164],
[-3.6898057525611589,
-1.2677190855156679]], [[-0.67643093351636185, -9.4391007450008395], [-4.3305338119919643,
-4.9254528842808503]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(4.16645075056,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.5917180025121436, -0.50082927718401749, 0.71261274386013618, 2.4216324938382936],
[2.5988764746053095,
0.15985324844397741, -2.1952754277135025, -2.1102730593254035], [4.7816092243808672, -3.1240954141765496,
4.0831220997721331, 2.4301203557965216]], [[3.4691826046114969, -2.4961081730013177, -4.9623977358253111,
2.2652744558918698],
[0.41830032681767193, -3.2186897293959649, -4.1590967541108324, -1.7789994379155196], [-0.17901184206486764,
-0.85223673399918809, 1.2515459884606104, -4.530305999148645]]], [[[-4.9028671865135838, 3.9106181278983012,
0.69716765577825246, 4.8537569187159395], [-2.8912890367657318, -4.8177854256421764, -4.3303142092509415,
-0.99481907472179198], [-1.2640734452454305, 4.8028129765204639, -2.5491771511234962, 3.2550469051981921]],
[[2.0572417475748761, 3.7392706991121187, 4.5778678295843704, 3.6658188498258486], [-2.7069743698567206,
-2.684769111460461, -3.0941141983763156, -2.1180719361316589], [-1.4744678905986119, 1.926687036555828,
2.2206999030392947, 0.72956973127168734]]], [[[-2.8290294475300151, -3.1467788245496631, 3.6471044178360348,
3.5237454065241209], [-1.6165850845596652, 1.2437746199742081, -2.8022357261752004, -1.9652183524467781],
[-2.3842126490032092, 3.7068998814751613, -1.389546865398994, -1.7153758702474589]], [[-1.0746517242894815,
-4.3575382718398723, 0.93160793707280121, 1.4002531109392731], [-1.5745690740270168, -3.4394046042905124,
4.2641517580348793, -1.7620679696550843], [-4.2559205627171135, 2.1912319337278863, 1.1987265764805723,
-3.2957352772592809]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.7581687530761378, 3.6656214733799768, 4.8790634944241305, 6.5880832444022879],
[6.7653272251693037, 4.3263039990079717, 1.9711753228504918, 2.0561776912385907], [8.9480599749448615,
1.0423553363874447, 8.2495728503361274, 6.5965711063605159]], [[7.6356333551754911, 1.6703425775626766,
-0.7959469852613168, 6.4317252064558641], [4.5847510773816662, 0.94776102116802941, 0.0073539964531619262,
2.3874513126484747], [3.9874389084991266, 3.3142140165648062, 5.4179967390246047, -0.36385524858465068]]],
[[[-0.7364164359495895, 8.0770688784622955, 4.8636184063422467, 9.0202076692799338], [1.2751617137982625,
-0.6513346750781821, -0.16386345868694718, 3.1716316758422023], [2.9023773053185637, 8.9692637270844582,
1.6172735994404981, 7.4214976557621863]], [[6.2236924981388704, 7.905721449676113, 8.7443185801483647,
7.8322696003898429], [1.4594763807072737, 1.4816816391035332, 1.0723365521876786, 2.0483788144323354],
[2.6919828599653823, 6.0931377871198222, 6.3871506536032889, 4.8960204818356816]]], [[[1.3374213030339792,
1.0196719260143312, 7.8135551684000291, 7.6901961570881152], [2.5498656660043291, 5.4102253705382024,
1.3642150243887938, 2.2012323981172162], [1.7822381015607851, 7.8733506320391555, 2.7769038851650003,
2.4510748803165354]], [[3.0917990262745128, -0.19108752127587803, 5.0980586876367955, 5.5667038615032673],
[2.5918816765369774, 0.72704614627348185, 8.4306025085988736, 2.40438278090891], [-0.089469812153119221,
6.3576826842918805, 5.3651773270445666, 0.87071547330471333]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.8454947431609945, 3.4801848055393254]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.181985677208)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([4.0274804203691783, 3.6621704827475092]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([2.6719646801005306, 4.0262173014652003]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.7355891147806837, -3.0309968912239551])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([6.4075537948812142, 0.99522041024124519]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.209887477038702, 2.087043312051243, 3.7254247294014622,
-3.7510652436671732, 0.70343608099575317], [4.1654611738215745, 1.5418518980850271,
2.7730022594684423, 3.386030420596251, 1.2758288509710365], [2.2174938185138764,
-1.244837837360393, 2.2331288285078887, -1.1442348969501834, 1.9394801392868004],
[0.68612447219195705, 0.7127527031233436, -3.6346644102130776, 2.0671128943191714,
3.7445028703597156]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.82316401579)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.0330514928326018, 6.9102073278451428, 8.5485887451953619,
1.0720987721267266, 5.5266000967896529], [8.9886251896154743, 6.3650159138789268,
7.596166275262342, 8.2091944363901508, 6.0989928667649362], [7.0406578343077761,
3.5783261784335068, 7.0562928443017885, 3.6789291188437163, 6.7626441550807002],
[5.5092884879858568, 5.5359167189172434, 1.1884996055808221, 6.8902769101130712,
8.5676668861536154]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-3.62961836797558, 4.0323249470469893, -2.4833229912823516,
-0.0081902035785272886, -0.26448613257378906], [2.0867535529248489, 0.049446344294963751,
4.4906317789174501, 2.6121865600043499, 1.3687146632565392], [4.2509170325103511,
2.9845191554148567, -0.9329820582137387, -0.58236994049271118, -3.4448732067194388],
[-2.3231599587033402, 1.6550934434842866, -4.5990521452319584, -2.1470268566500152,
-3.9698084155531008]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[3.3234017918244003, 3.3386199217996175, -2.5928786077225316,
-4.1429140632213803, 0.42204291369978719], [3.4123580113357495, -3.9076190537235664,
1.8779298531672159, 0.98377543853039562, -4.9365820051249267], [4.5252395032935961,
-4.8193051910732096, 1.060979071451845, -3.2927325266544871, -3.3828356655691971],
[-4.6411804903406182, -0.42921544747540707, -2.4541073523344323, -0.70845691989162329,
-1.2357505826155588]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-0.3062165761511797, 7.3709448688466068, -5.0762015990048832,
-4.1511042667999076, 0.15755678112599814], [5.4991115642605983, -3.8581727094286027,
6.3685616320846661, 3.5959619985347455, -3.5678673418683875], [8.7761565358039473,
-1.834786035658353, 0.12799701323810631, -3.8751024671471983, -6.8277088722886354],
[-6.9643404490439584, 1.2258779960088795, -7.0531594975663907, -2.8554837765416385,
-5.2055589981686596]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.0819775543023136, 4.4438294149957258], [1.203494127071604,
1.3934659764012478]], [[-1.7207192546012995, 1.128687542370864], [1.013953229943537,
2.0535582502969056]], [[-1.8482126685735398, 0.64499519705235819],
[-4.1200947648310313, 3.8041018736261574]], [[-0.12876390427677542, -0.26859118353213773],
[-2.8945993824974847, -3.3476923883525944]], [[3.1332107854705562, -4.6334666373330595],
[3.0499420638074994, -2.7959034777693104]], [[4.726734207260332, -1.3724501610660034],
[3.3499737674080023, -2.515294322458935]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.860178486532)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.2217990677700952, 5.3040079015279442], [2.0636726136038224,
2.2536444629334662]], [[-0.86054076806908109, 1.9888660289030824], [1.8741317164757554,
2.913736736829124]], [[-0.98803418204132143, 1.5051736835845766], [-3.2599162782988129,
4.6642803601583758]], [[0.73141458225544298, 0.59158730300008067], [-2.0344208959652663,
-2.487513901820376]], [[3.9933892720027746, -3.7732881508008411], [3.9101205503397178,
-1.935724991237092]], [[5.5869126937925504, -0.51227167453378497], [4.2101522539402207,
-1.6551158359267166]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-1.849788129717993, 0.64693319038907493], [3.0379670344950327,
0.80277076526299229]], [[2.4995340022105639, -4.3955703049125949], [0.58331276679079203,
0.044119077451267863]], [[2.2979922792046947, 1.6054844683234073], [0.50524258350986084,
-3.5539312710422779]], [[-1.1980433912188793, -2.6450000406046001], [-2.4128326188310121,
0.80678465051263526]], [[-2.9963692865064209, -1.0152803020104519], [-0.21931259441936035,
-1.153119362615751]], [[-4.2927186206837717, 0.4561872009236847], [3.0860876046130041,
-0.78568544768378068]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.4985389035935222, 1.8888458641158987], [-4.2891085749380489,
2.8296217607019845]], [[-0.8200921678141917, 4.4359194831012676],
[-4.6185751325042244, 0.16520675598470014]], [[-2.801157092531934, 3.6231020804204928],
[1.5439760747845899, 2.0378140868272894]], [[0.99864930993784018, 3.369884315459073],
[4.399815205976239, -4.9546136700941936]], [[1.6240932313892289, -3.4517363344048615],
[2.8668483027947236, 1.1624090061600336]], [[2.6364367974081624, 2.628371373764919],
[-2.5877409052653833, -1.29236451403668]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-5.3483270333115147, 2.5357790545049737], [-1.2511415404430162,
3.6323925259649767]], [[1.6794418343963722, 0.040349178188672674],
[-4.0352623657134323, 0.209325833435968]], [[-0.50316481332723928, 5.2285865487439001],
[2.0492186582944507, -1.5161171842149885]], [[-0.19939408128103908, 0.72488427485447282],
[1.9869825871452269, -4.1478290195815584]], [[-1.372276055117192, -4.4670166364153134],
[2.6475357083753632, 0.0092896435442826331]], [[-1.6562818232756094,
3.0845585746886037], [0.49834669934762088, -2.0780499617204606]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[-0.026017904532606551, -0.80192450547405958,
0.93785799257835656, -4.4900007911078319], [-1.8444162073720949,
1.2059856695600812, 1.8326324480310756, 3.3745782356451564],
[3.0929324433706693, -0.94197156488767142, -2.3469684397851207,
-4.8976052662192613]], [[1.2658444546015346, 3.0389250549456399,
-2.567254770133963, 3.7513728753285314], [-0.10225306211433605,
-0.34121316520335299, -2.8745573331597321, -0.73976781968982142],
[4.6114590072566681, 3.5325642767850063, 2.1587079910040661,
3.8644723652636905]]], [[[-2.5953113243103623, 0.6437882672443429,
4.5677362343759853, 3.4108524985046262], [2.9904338528780352,
0.73113299006492127, 2.4253724263400445, 3.8646536702562031],
[-1.2545053686514152, -4.2675706218911706, -3.6576679389702105,
-0.29502287354943402]], [[0.9550527228483654, 2.9537233833481267,
-2.6904009310953283, 1.5998857010519698], [-3.7171702199982004,
-1.1578306702024044, 1.764070139728485, -1.1506068782808967],
[1.5727320181060982, 0.18468074769418674, 3.3262967055395372,
-1.2208265816075849]]], [[[-0.25003967903418278, -2.603663543909648,
4.6824047463125531, 1.0968919539473987], [1.3471700099604398,
-3.8321880437450218, -4.2809409903460676, 1.2933005361204906],
[-2.857251250328674, 3.6768205829450178, -2.7999953058490643,
2.1117422072666692]], [[-2.1994223710236427, 3.7669030216280923,
-3.5232105054852991, -3.7071480752824462], [-0.35952695279389246,
2.5451704526750873, -4.2842310996736144, -1.3813503044378783],
[-2.5647173415905145, 4.7437501634141572, -4.2234318870342245,
2.1862042652792866]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(0.33323555487)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[0.30721765033724147, -0.46868895060421156,
1.2710935474482046, -4.1567652362379839], [-1.5111806525022469,
1.5392212244299293, 2.1658680029009236, 3.7078137905150044],
[3.4261679982405173, -0.6087360100178234, -2.0137328849152727,
-4.5643697113494133]], [[1.5990800094713826, 3.3721606098154879,
-2.234019215264115, 4.0846084301983794], [0.23098249275551197,
-0.0079776103335049697, -2.541321778289884, -0.4065322648199734],
[4.9446945621265161, 3.8657998316548543, 2.4919435458739141,
4.1977079201335386]]], [[[-2.2620757694405143, 0.97702382211419092,
4.9009717892458333, 3.7440880533744743], [3.3236694077478832,
1.0643685449347693, 2.7586079812098925, 4.1978892251260511],
[-0.92126981378156714, -3.9343350670213226, -3.3244323841003625,
0.038212681320413999]], [[1.2882882777182134, 3.2869589382179747,
-2.3571653762254803, 1.9331212559218178], [-3.3839346651283524,
-0.82459511533255636, 2.097305694598333, -0.81737132341104868],
[1.9059675729759462, 0.51791630256403476, 3.6595322604093852,
-0.88759102673773693]]], [[[0.083195875835665234, -2.2704279890398,
5.0156403011824011, 1.4301275088172467], [1.6804055648302878,
-3.4989524888751737, -3.9477054354762195, 1.6265360909903386],
[-2.524015695458826, 4.0100561378148658, -2.4667597509792163,
2.4449777621365172]], [[-1.8661868161537947, 4.1001385764979403,
-3.1899749506154511, -3.3739125204125981], [-0.026291397924044446,
2.8784060075449354, -3.9509955448037664, -1.0481147495680303],
[-2.2314817867206664, 5.0769857182840052, -3.8901963321643764,
2.5194398201491346]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[1.6204760394819004, -0.95393695229398112, -1.221681223499369, 2.6618713903411937],
[-1.5387523541807724, 4.6220978399651482, -2.1795716817360713, -3.776821154104939], [1.4330066566763016,
3.7880327985429378, -0.65902727001966976, -4.29506128665055]], [[-4.0199255222547103, -3.644811287300751,
3.6508998060332054, -3.569704984460552], [-3.8429890733645489, -2.9119635791576437, 2.3183698092323652,
1.3643661323778851], [2.9328022056563725, -0.080129403375118535, 0.15566128013433289, 2.344258136058456]]],
[[[3.03272210358924, 2.8841814084596393, -4.059068204445289, -0.091640986980607408], [-4.2591024547151859,
-0.36305436045316863, 0.19284537915686428, 4.5041324479849649], [1.2988816365062537, -1.6778808169453416,
-3.5496975707176146, 4.314356820196215]], [[-1.4533462849506518, -1.003910808707118, 3.8948057966291092,
1.266066103629278], [-4.4119138102620346, -2.1246183047037603, -2.4610566322999161, -3.5862383252945271],
[2.9290698526446066, -0.26093763373887136, 0.87809331627623344, -0.47993365832407076]]], [[[2.1717793325666745,
0.83592896851733212, -2.2538107669063279, 1.6303402530881517], [-0.53207705017646578, -4.5214994998308979,
-3.6999121226789988, 3.5355643886671686], [3.3936340080223193, -2.1140030580705247, 1.821327452830638,
-1.6123768640462668]], [[2.3105165926895497, -3.0414367260786292, -1.5788704194425076, 1.0377969965556915],
[1.3575822980511116, 4.3465002873169833, 0.55678010189701688, 4.99079375906609], [4.2819911907361128,
4.9615031124625322, 2.7964852390480104, 0.029646894001982282]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.779495003239937, -4.7840877608643506, 2.651273004571375, -2.179381582597685],
[-0.27370078331190673, -3.6151069379138887, -0.6880481455894909, 4.4373993248198644], [-1.6276288613086387,
-1.6376839670015721, -3.1138607609774835, -2.7809800576738719]], [[0.85446276622548556, -4.3676040003341114,
-4.0083595770538496, -3.915065868011578], [1.6989039436984452, 3.5347026474299419, -1.8748410832866327,
-4.6526613314583045], [1.9480513434936046, 4.7386182205273322, -1.2001630607496541, 1.8094726084650006]]],
[[[4.9996435011863589, 0.60285036470010045, 1.457536438507919, 2.7443970579013879], [4.131864622110669,
0.20996245110639133, 3.3652305004680549, 3.1437873739212119], [-3.0818670302029405, -2.461603163946088,
-0.56609916674720218, -4.1186964404844861]], [[-2.7183232427482262, -2.1509712746053999, -2.281087666097271,
-2.4094567126275344], [-3.4723848022755091, -1.563218902128277, -4.7598832341275878, 1.8751725484288029],
[-4.0474621098792882, 0.59894943914858167, 1.0736279895120182, 4.5015525072725033]]], [[[-3.0082200796749703,
0.23283074563588535, 2.5230303985659734, 4.8262414779000231], [3.3772486493634837, 1.8234317033464915,
-1.7905158376185746, -2.9990918311449244], [-3.6765085717620041, 2.0057610304617572, -2.1487273241068525,
-4.1965541804451352]], [[0.26210933249566715, -2.9167787158271663, -0.89589477578380539, -0.41427249402553912],
[-3.1708181836677332, 4.3890602408555726, -1.1754542095914857, 4.8422639037274919], [-3.0044937138520034,
-4.1626528668210083, 0.20385989364778467, -0.016309737359709864]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[5.3999710427218375, -5.7380247131583317, 1.429591781072006, 0.48248980774350869],
[-1.8124531374926791, 1.0069909020512595, -2.8676198273255622, 0.66057817071492542], [-0.19462220463233715,
2.1503488315413657, -3.7728880309971533, -7.0760413443244214]], [[-3.1654627560292248, -8.0124152876348624,
-0.35745977102064419, -7.4847708524721295], [-2.1440851296661037, 0.62273906827229819, 0.44352872594573256,
-3.2882951990804195], [4.8808535491499772, 4.6584888171522136, -1.0445017806153212, 4.1537307445234566]]],
[[[8.0323656047755989, 3.4870317731597398, -2.60153176593737, 2.6527560709207805], [-0.12723783260451693,
-0.1530919093467773, 3.5580758796249192, 7.6479198219061768], [-1.7829853936966868, -4.1394839808914297,
-4.1157967374648168, 0.19566037971172889]], [[-4.171669527698878, -3.154882083312518, 1.6137181305318382,
-1.1433906089982564], [-7.8842986125375436, -3.6878372068320373, -7.2209398664275035, -1.7110657768657243],
[-1.1183922572346816, 0.33801180540971032, 1.9517213057882516, 4.0216188489484326]]], [[[-0.83644074710829575,
1.0687597141532175, 0.26921963165964558, 6.4565817309881748], [2.8451715991870179, -2.6980677964844064,
-5.4904279602975734, 0.53647255752224421], [-0.28287456373968478, -0.10824202760876744, -0.3273998712762145,
-5.808931044491402]], [[2.5726259251852168, -5.9582154419057956, -2.474765195226313, 0.62352450253015235],
[-1.8132358856166215, 8.7355605281725559, -0.61867410769446884, 9.833057662793582], [1.2774974768841094,
0.79885024564152385, 3.0003451326957951, 0.013337156642272419]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(3.50668349593,self.functionspace)
arg0.setTaggedValue(1,-3.09146650776)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.32369560802)
sub=res.substitute({arg1:s1})
ref=Data(-0.81701211209,self.functionspace)
ref.setTaggedValue(1,-7.41516211578)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(3.83444600418,self.functionspace)
arg0.setTaggedValue(1,-0.266863397142)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([3.6938635924807581, -2.3199399928130826])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.5283095966592981, 1.5145060113654574]),self.functionspace)
ref.setTaggedValue(1,numpy.array([3.4270001953384694, -2.5868033899553713]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(-2.85642807584,self.functionspace)
arg0.setTaggedValue(1,-0.357260114938)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[4.4124412590911621, 1.732298167196193, 1.8228166076040306, -3.9853565905277355,
3.3793508288079881], [-1.5339512663354116, -2.8915144317379058, -3.6493591659102464, 1.4243106283527815,
-0.6931246781623841], [4.7714119110273394, 0.45700055229079606, 1.2539528503924027, -1.4029360809413403,
2.8915917074007416], [4.2546657221847255, 3.2639891865967527, -0.4712967898993945, -3.9077971138749112,
-3.5655383189938084]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.5560131832472779, -1.1241299086476912, -1.0336114682398536, -6.8417846663716197,
0.52292275296410384], [-4.3903793421792958, -5.74794250758179, -6.5057872417541311, -1.4321174474911027,
-3.5495527540062684], [1.9149838351834552, -2.3994275235530882, -1.6024752254514816, -4.2593641567852245,
0.035163631556857311], [1.3982376463408412, 0.40756111075286849, -3.3277248657432787, -6.7642251897187951,
-6.4219663948376926]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[4.0551811441529519, 1.3750380522579828, 1.4655564926658204,
-4.3426167054659457, 3.0220907138697779], [-1.8912113812736218, -3.248774546676116, -4.0066192808484562,
1.0670505134145714, -1.0503847931005943], [4.4141517960891292, 0.099740437352585865, 0.89669273545419248,
-1.7601961958795505, 2.5343315924625314], [3.8974056072465153, 2.9067290716585426, -0.82855690483760469,
-4.2650572288131219, -3.9227984339320185]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-2.98759917871,self.functionspace)
arg0.setTaggedValue(1,-4.26584239637)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[0.65736935684204045, 1.4685807994312459], [0.99740155640158257, -2.8001282911414127]],
[[-0.80947613326718226, -4.0270117786915378], [1.1564198209626229, -4.917538904347448]], [[-1.0488230155998202,
4.0958534641909754], [-4.9502522108275002, -0.19486641488505008]], [[-4.507307254914509, -0.98539101308887389],
[-4.5909807035957675, 2.4265853650826985]], [[-4.252924691613126, 0.42394291278212481], [3.4198717705842103,
-4.6000003047031024]], [[4.9609535782609235, 3.1625779529060711], [0.26834958946896492, 3.0941570460788874]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-2.3302298218695272, -1.5190183792803218], [-1.9901976223099851, -5.7877274698529799]],
[[-3.7970753119787499, -7.0146109574031055], [-1.8311793577489448, -7.9051380830590157]], [[-4.0364221943113883,
1.1082542854794077], [-7.9378513895390679, -3.1824655935966177]], [[-7.4949064336260767, -3.9729901918004416],
[-7.5785798823073351, -0.56101381362886915]], [[-7.2405238703246937, -2.5636562659294428], [0.43227259187264266,
-7.5875994834146701]], [[1.9733543995493559, 0.17497877419450347], [-2.7192495892426027,
0.10655786736731976]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.6084730395261495, -2.7972615969369441], [-3.2684408399666074,
-7.0659706875096031]], [[-5.0753185296353722, -8.2928541750597269], [-3.1094225754055671, -9.183381300715638]],
[[-5.3146654119680097, -0.16998893217721456], [-9.2160946071956893, -4.46070881125324]], [[-8.773149651282699,
-5.2512334094570638], [-8.8568230999639574, -1.8392570312854915]], [[-8.5187670879813169, -3.8418994835860651],
[-0.84597062578397964, -8.8658427010712924]], [[0.69511118189273358, -1.1032644434621188], [-3.997492806899225,
-1.1716853502893025]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-3.36894529378,self.functionspace)
arg0.setTaggedValue(1,-4.62956527999)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-4.6824549992604805, 0.17860523484039881, -3.9939994980255102, -0.36579022311332743],
[-2.0003582573358858, 3.3436256968249793, -1.5671485178714373, 3.9554829351801821], [4.0499415739210693,
-3.1796189569360358, 0.28181611699077536, 1.4851321313182684]], [[4.9608073066477267, 2.1353944107091136,
3.2103965744924743, 0.36273874746876089], [0.33193515801312934, -1.8768462949087295, -3.5968753845201462,
-1.9342255010038101], [-0.98845968068423407, -2.6505467151645048, -3.9269883741621214, -1.2671783073823359]]],
[[[4.0296290320262234, 0.094183089334959114, -1.6548527114390654, 1.1815006848827636], [4.4205350333429578,
1.0602877007979998, -2.7207610093848364, 2.5749353581909009], [2.368743673752042, 0.36879117257479166,
3.1294699111463196, 3.8766421343643209]], [[-4.2994052301352443, -4.4665347726615128, -4.9654257982784813,
1.4010627781386145], [-0.49010647980719568, 1.1149343027340697, 3.8533389980231654, -1.4762647122950145],
[-2.4078638813490985, 4.4431147205208923, 3.0392301612263246, -2.3032611338556377]]], [[[1.1388924488325571,
4.4978561941078308, -3.3123851704811691, 1.3453478111463726], [4.1779635175178385, 3.1786527767023234,
-2.8109803623964669, 4.7217176158252876], [0.26914741902392958, -1.6630169842885789, -3.6267544687045641,
-4.7016327677304943]], [[0.44478691577550755, 2.9451130426961889, -1.0836274217802466, -4.8754431681482586],
[1.6457024072282014, -1.106310648992209, -3.2732924796145912, 4.7940609535301668], [-4.2482158844391957,
2.2391243759174451, 4.6408645091714327, 4.1449515947243611]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-8.0514002930449351, -3.1903400589440558, -7.3629447918099649, -3.734735516897782],
[-5.3693035511203409, -0.025319596959475277, -4.9360938116558923, 0.5865376413957275], [0.68099628013661473,
-6.5485642507204904, -3.0871291767936793, -1.8838131624661862]], [[1.5918620128632721, -1.233550883075341,
-0.15854871929198033, -3.0062065463156937], [-3.0370101357713253, -5.2457915886931836, -6.9658206783046008,
-5.3031707947882651], [-4.3574049744686887, -6.0194920089489594, -7.2959336679465761, -4.6361236011667906]]],
[[[0.66068373824176874, -3.2747622044494955, -5.0237980052235205, -2.187444608901691], [1.0515897395585032,
-2.3086575929864548, -6.0897063031692911, -0.79400993559355371], [-1.0002016200324126, -3.000154121209663,
-0.23947538263813506, 0.5076968405798663]], [[-7.668350523919699, -7.8354800664459674, -8.3343710920629359,
-1.9678825156458402], [-3.8590517735916503, -2.2540109910503849, 0.48439370423871075, -4.8452100060794692],
[-5.7768091751335531, 1.0741694267364377, -0.32971513255813001, -5.6722064276400923]]], [[[-2.2300528449518975,
1.1289109003233762, -6.6813304642656242, -2.023597482638082], [0.80901822373338383, -0.19029251708213124,
-6.1799256561809219, 1.352772322040833], [-3.099797874760525, -5.0319622780730331, -6.9956997624890187,
-8.0705780615149489]], [[-2.9241583780089471, -0.42383225108826572, -4.4525727155647008, -8.2443884619327132],
[-1.7232428865562532, -4.4752559427766636, -6.6422377733990459, 1.4251156597457122], [-7.6171611782236504,
-1.1298209178670096, 1.2719192153869781, 0.77600630093990652]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-9.3120202792456048, -4.4509600451447255, -8.6235647780106355,
-4.9953555030984518], [-6.6299235373210106, -1.285939583160145, -6.1967137978565621, -0.67408234480494222],
[-0.579623706064055, -7.8091842369211601, -4.347749162994349, -3.144433148666856]], [[0.33124202666260238,
-2.4941708692760107, -1.4191687054926501, -4.2668265325163635], [-4.297630121971995, -6.5064115748938534,
-8.2264406645052706, -6.5637907809889349], [-5.6180249606693584, -7.2801119951496291, -8.5565536541472458,
-5.8967435873674603]]], [[[-0.59993624795890099, -4.5353821906501652, -6.2844179914241902, -3.4480645951023607],
[-0.20903024664216652, -3.5692775791871245, -7.3503262893699608, -2.0546299217942234], [-2.2608216062330824,
-4.2607741074103327, -1.5000953688388048, -0.75292314562080342]], [[-8.9289705101203687, -9.0961000526466371,
-9.5949910782636056, -3.2285025018465099], [-5.11967175979232, -3.5146309772510547, -0.77622628196195897,
-6.1058299922801389], [-7.0374291613342228, -0.18645055946423206, -1.5903351187587997, -6.932826413840762]]],
[[[-3.4906728311525672, -0.13170908587729357, -7.9419504504662939, -3.2842174688387518], [-0.45160176246728589,
-1.450912503282801, -7.4405456423815917, 0.092152335840163246], [-4.3604178609611948, -6.2925822642737028,
-8.2563197486896875, -9.3311980477156187]], [[-4.1847783642096168, -1.6844522372889355, -5.7131927017653705,
-9.505008448133383], [-2.983862872756923, -5.7358759289773333, -7.9028577595997156, 0.16449567354504246],
[-8.8777811644243201, -2.3904409040676793, 0.011299229186308324, -0.48461368526076321]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([-4.9434811071655114, 1.7588416724781917]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([3.0524482361043965, -0.58828792238396233]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-4.86003727467)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-9.8035183818403411, -3.1011956021966389]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-1.8075890385704341, -5.4483251970587929]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([0.47124983588436109, 3.3842142103059487]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([4.4506172428158504, -1.5976912605342894]))
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([2.7380372395241483, -1.2414970456241372])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([3.2092870754085094, 2.1427171646818115]),self.functionspace)
ref.setTaggedValue(1,numpy.array([7.1886544823399987, -2.8391883061584267]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[3.7123556177495072, -1.2322724929891438, -4.3196981967098704, 4.5149190397092358,
-3.4294461596271342], [-0.32526237821140569, 4.906418518064358, 1.6782843293160443, -4.5452294423093242,
-3.4252951962126454], [4.7623389482797158, 4.8957853100883888, 2.4605965522735644, -3.3235939770772349,
-3.6622677868193731], [3.7849671492059009, -3.7965523255405484, -0.98706292680421903, -2.9575953641431996,
3.7235194699440495]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[3.846235478086534, -2.9152984736534773, 2.1299170235868692,
1.4194093106373815, -1.9728564928751369], [0.12730504885223404, -2.4537968289763077, 1.8352652361138375,
-1.1054616749639532, -0.67553225283567997], [-4.6542627767136047, 0.014905560429250286, 0.84138572626791408,
-1.4074784720342515, -3.3322631066777983], [-0.64893500421415951, 4.4524265176475826, -3.5204114624144456,
3.5239615703390363, 2.3718443568961201]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.4845259086)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[7.1968815263516515, 2.2522534156130005, -0.83517228810772615, 7.9994449483113801,
0.055079748975010112], [3.1592635303907386, 8.3909444266665023, 5.1628102379181886, -1.06070353370718,
0.059230712389498841], [8.2468648568818601, 8.3803112186905331, 5.9451224608757087, 0.16093193152490937,
-0.17774187821722887], [7.2694930578080452, -0.31202641693840416, 2.4974629817979253, 0.52693054445894472,
7.2080453785461938]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[7.3307613866886783, 0.56922743494866701, 5.6144429321890135,
4.9039352192395258, 1.5116694157270074], [3.6118309574543783, 1.0307290796258366, 5.3197911447159818,
2.3790642336381911, 2.8089936557664643], [-1.1697368681114604, 3.4994314690313946, 4.3259116348700584,
2.0770474365678928, 0.15226280192434594], [2.8355909043879848, 7.9369524262497269, -0.035885553812301296,
7.0084874789411806, 5.8563702654982643]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[2.7675952117994296, 0.98431175880226363, -1.8309000840442566, 2.0351166910383416,
2.1718600084175153], [0.64718493825654111, 3.0274641310077364, 4.6031246235215555, -0.072830522019846633,
-3.436466903373192], [-2.7989895712459734, 3.2804563231391093, 3.1416998470123456, 0.25702028842752966,
-3.1553411419958821], [-4.5620989116806543, -0.23300222673645532, -2.3978689464069101, 0.41391436589174457,
-3.7252639362836382]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[-2.1509506437818238, -2.5007519800405218, 0.30616207266744233,
-0.46790716227581797, 0.6454558125610621], [1.9589653025955753, -4.9059174981425437, -4.7107956989445992,
2.6150016745692826, -3.3329567586885211], [1.1850451086308738, 3.8781029980110997, -4.7104324292639133,
-4.8362413881812492, 4.9066980390674555], [-1.2440311634968171, -1.6429522113717008, 4.0547225056117124,
-0.33314796054153195, -2.6143781039708855]]))
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-0.0104190624259477, 3.439083370835446, -1.7585221913131677, 3.8784501968475897,
0.08088556648108991], [0.53276272310770789, -1.3171951284400176, -0.841014288686317, 2.4350359443944622,
0.55796159262639922], [-3.3985580423616479, 0.73804937880111687, 0.84641655693241269, -2.0376479444757822,
-0.094456394031885438], [0.8829252865168975, 0.84170422580042903, -1.9539396350167637, -4.8054718599517194,
-0.37594711864698205]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[2.7571761493734819, 4.4233951296377096, -3.5894222753574243, 5.9135668878859313,
2.2527455748986052], [1.179947661364249, 1.7102690025677187, 3.7621103348352385, 2.3622054223746156,
-2.8785053107467928], [-6.1975476136076217, 4.0185057019402262, 3.9881164039447583, -1.7806276560482526,
-3.2497975360277676], [-3.6791736251637568, 0.60870199906397371, -4.3518085814236738, -4.3915574940599749,
-4.1012110549306202]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-2.1613697062077715, 0.93833139079492422, -1.4523601186457253,
3.4105430345717718, 0.72634137904215201], [2.4917280257032832, -6.2231126265825614, -5.5518099876309162,
5.0500376189637448, -2.7749951660621219], [-2.2135129337307742, 4.6161523768122166, -3.8640158723315006,
-6.8738893326570309, 4.8122416450355701], [-0.36110587697991958, -0.80124798557127175, 2.1007828705949487,
-5.1386198204932514, -2.9903252226178676]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[1.6094791339338048, 4.27222307751477], [4.9486531857239697, -4.5552975586923292]],
[[-0.12032729123703056, -4.1413061177629231], [-2.7473350985925316, 4.7319188820310991]], [[0.13107637034429231,
-3.2138415379490204], [-3.9942457581718696, 1.3262496008026838]], [[2.56850905863657, 1.8321753808437329],
[4.5176482730823331, 4.4664637318837137]], [[0.50860355331966556, 0.55279434819439199], [3.1688695988617859,
-2.6740526298455016]], [[4.4977965557520072, 3.6422271944652209], [3.7948343945899445,
-3.0377990068633332]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[-2.9548694146760557, 3.1101651017467038], [-0.31006440672923752,
0.74616091042484989]], [[-3.1016477433464864, 2.9532816390640111], [-2.0494474684559894, -1.1448583599993354]],
[[4.2052724347365604, -1.8157003708847643], [4.8073133555422327, -2.7045312989764492]], [[-2.3803833325202763,
0.19928505008920272], [-2.8622812030202094, 3.9488692362256081]], [[-4.1266217915470236, 4.8461083576413735],
[-3.1895474177762351, 4.4625154514412237]], [[-0.65350755924337811, 2.8015786665738105], [0.94103003425367859,
0.27556367440023166]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.49324308458)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.1027222185118468, 8.765466162092812], [9.4418962703020117, -0.062054474114287217]],
[[4.3729157933410114, 0.35193696681511888], [1.7459079859855104, 9.2251619666091411]], [[4.6243194549223343,
1.2794015466290216], [0.49899732640617245, 5.8194926853807258]], [[7.061752143214612, 6.3254184654217749],
[9.0108913576603751, 8.9597068164617557]], [[5.0018466378977076, 5.046037432772434], [7.6621126834398279,
1.8191904547325404]], [[8.9910396403300492, 8.1354702790432629], [8.2880774791679865,
1.4554440777147089]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.5383736699019863, 7.6034081863247458], [4.1831786778488045,
5.2394039950028919]], [[1.3915953412315556, 7.4465247236420531], [2.4437956161220526, 3.3483847245787066]],
[[8.6985155193146024, 2.6775427136932777], [9.3005564401202747, 1.7887117856015928]], [[2.1128597520577657,
4.6925281346672447], [1.6309618815578326, 8.4421123208036501]], [[0.36662129303101842, 9.3393514422194155],
[1.3036956668018069, 8.9557585360192657]], [[3.8397355253346639, 7.2948217511518525], [5.4342731188317206,
4.7688067589782737]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.7345315461324993, 4.5316724428402377], [-1.2207000383039999, -2.1651454481686692]],
[[-2.5222456135735638, 3.1325113872519896], [0.54140311786327011, -1.6266115642059011]], [[4.3999274072752783,
-0.64510581732829841], [-3.3878893926233533, -0.14783111107246061]], [[2.4816188811184228, 1.505965932327137],
[-2.8128544405052458, 3.2460332510852936]], [[1.5649806120186849, 1.1768584297160487], [-3.3133262672401544,
-2.5740884272652789]], [[2.936076596237732, -0.80694051724477056], [1.6382059835800931,
-0.059174653042079584]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[4.107948776768561, 4.79459166600315], [-0.070211802843057391,
-2.3000592273671394]], [[1.53142006950028, 0.5983353676488381], [4.2000369856633419, -3.7326077043834074]],
[[-3.6852528003303684, -0.40061815593309014], [4.849947657932514, 3.2046322763443698]], [[4.6824735127774275,
-2.3356975272114679], [-1.4284737023138216, -0.96863966970867921]], [[4.4306883649430571, 0.16250464015770305],
[4.7866411719098583, -1.6949698779239197]], [[-4.9624929004021014, -0.4120760567738655], [-3.510925072784119,
-0.26388846668772636]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[3.7560333190798687, 0.63030183757017788], [-3.8821224320935288, 4.3508142113739634]],
[[4.3548667192676795, -3.4709315123037445], [-0.19540447292770935, -1.1720138856956916]], [[3.7993994701980398,
-4.5475458462287497], [-0.20650310401114513, -2.7802894344079201]], [[-0.46867874332271242, 0.82685022383334505],
[-3.5357776147305264, 0.7633420403065605]], [[-0.19578164461526359, -4.1370261640670458], [-1.2073883253186946,
0.74664652191646397]], [[-0.697880661399644, -0.46932885527321488], [2.4087818009804716, -1.8245102799854829]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[1.0215017729473694, 5.1619742804104156], [-5.1028224703975287, 2.1856687632052942]],
[[1.8326211056941157, -0.33842012505175489], [0.34599864493556076, -2.7986254499015928]], [[8.1993268774733181,
-5.1926516635570481], [-3.5943924966344984, -2.9281205454803807]], [[2.0129401377957103, 2.3328161561604821],
[-6.3486320552357718, 4.0093752913918541]], [[1.3691989674034213, -2.9601677343509971], [-4.520714592558849,
-1.8274419053488149]], [[2.238195934838088, -1.2762693725179854], [4.0469877845605646,
-1.8836849330275625]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[7.8639820958484297, 5.4248935035733279], [-3.9523342349365862,
2.050754984006824]], [[5.8862867887679595, -2.8725961446549064], [4.0046325127356326, -4.904621590079099]],
[[0.11414666986767141, -4.9481640021618398], [4.6434445539213689, 0.42434284193644967]], [[4.2137947694547151,
-1.5088473033781229], [-4.9642513170443481, -0.20529762940211871]], [[4.2349067203277935, -3.9745215239093428],
[3.5792528465911637, -0.94832335600745576]], [[-5.6603735618017454, -0.88140491204708038], [-1.1021432718036475,
-2.0883987466732092]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[1.2403285479679145, -0.65355746314869823, 0.23507371305026048, 2.9495208917061202],
[-4.4153187452600653, -1.0271324152128747, 3.6087985228033794, 1.587633224392107], [1.5882989512534262,
-2.3766989521547401, -4.6462509853387939, 1.1425676014861166]], [[-4.8469447836806694, -1.4338245370809863,
-4.8441809139347694, 0.082128480181090424], [4.2695412477206585, -2.0376229192188622, -2.685821131586259,
-4.5654361329152717], [3.5226403567783482, -4.9633770210253347, 4.1637469549065127, -3.5898874968684167]]],
[[[2.7439089503129228, 0.81346375693975492, -2.576882111469688, 4.758878084101946], [0.098363354586225249,
-4.314913184354209, -1.1821682575010484, 4.9687115939178916], [-2.5414207769554564, 1.9836872846103208,
-1.5982744174212127, 4.5509211096426121]], [[4.759533396882766, -4.550347299113696, 4.9394743649799153,
-3.9692445921595421], [1.5755016838325195, 2.6599597206311305, -0.59545966103916648, -1.308464088815966],
[1.7018715016873482, 0.31781368103450536, -0.91184792887657995, -0.60566457689943931]]], [[[-0.365764084374395,
-0.75878286483821444, -3.1104661623240091, -3.7302303444372109], [0.58052395594970907, 0.14085590954626337,
4.6712439745076182, 0.65991412045590181], [-4.5675491076195733, -3.3042112830144132, -2.6719400309110553,
-3.8520603991598765]], [[3.4260488825099618, -1.2789319515430164, 1.8435112511824903, 1.0773214658952854],
[-4.0772283149901236, 1.0211433275718873, -2.015430043082814, 0.1376630245430368], [1.3249956905172624,
3.1987247807146968, 1.0304156332749459, 3.785256475561086]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.8774766185796605, 3.1521364883779448, -4.9233158714840091,
3.7988193665209522], [4.8244393256113263, 2.4688683468745563, -4.5044275072582254, 1.1107496985072052],
[-2.9980383766650376, -4.2922660982517158, 3.4924104659712771, -0.5135964311738892]], [[1.9573144047865201,
-2.2686101409008961, -2.907052414660404, -4.0582253229051144], [-2.0281877168409657, 1.7867206317317663,
0.018511114285918673, -4.0475974398672498], [1.3023403490307315, 1.9932255873687215, -4.6698465653310688,
-4.5630845029599421]]], [[[-1.9525649263627876, -0.72040110769848908, -3.6987029249472769, -3.3184217891099999],
[-4.0519149413902857, 4.1195877398536549, -3.8261874289376463, 3.423780007792768], [0.11768639970294359,
-1.4898880703788131, -1.1746648112150213, -0.28493737967147226]], [[-2.0138403307539932, 3.9987186392010816,
-1.0125535260055338, 0.57376641241565363], [4.213727608092972, 0.51388058678005066, -4.4106027756910908,
-1.9979423050108283], [1.5708368447511347, -1.6270284297780933, -0.55277364435139376, -1.7748804647831715]]],
[[[2.7639070541103061, 2.7303808332951629, 0.41148416591473591, -1.9337000414572802], [-2.7585163378482456,
2.2319457297797207, 3.7988668025967804, 3.6103374331669471], [-4.5925114196923271, -2.1274746711435997,
3.3094547630756779, -4.1386856959210352]], [[-2.1348423629137692, 3.539794593057783, 4.8265405725541157,
4.9426398297282788], [4.5757071915543417, -4.0433372993763399, -0.84096548582416997, 2.0567811910343226],
[4.5367596882428671, -4.9139510999364404, 1.1342166543217944, 1.4859311895053571]]]]))
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(4.83582066753)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.0761492154946453, 4.1822632043780326, 5.0708943805769913, 7.785341559232851],
[0.4205019222666655, 3.8086882523138561, 8.4446191903301102, 6.4234538919188378], [6.424119618780157,
2.4591217153719906, 0.18956968218793691, 5.9783882690128474]], [[-0.011124116153938601, 3.4019961304457444,
-0.008360246408038563, 4.9179491477078212], [9.1053619152473892, 2.7981977483078686, 2.1499995359404718,
0.27038453461145906], [8.358461024305079, -0.12755635349860395, 8.9995676224332435, 1.2459331706583141]]],
[[[7.5797296178396536, 5.6492844244664857, 2.2589385560570427, 9.5946987516286768], [4.934184022112956,
0.52090748317252178, 3.6536524100256824, 9.8045322614446224], [2.2943998905712744, 6.8195079521370516,
3.2375462501055181, 9.3867417771693429]], [[9.5953540644094968, 0.28547336841303483, 9.7752950325066461,
0.86657607536718873], [6.4113223513592503, 7.4957803881578613, 4.2403610064875643, 3.5273565787107648],
[6.537692169214079, 5.1536343485612361, 3.9239727386501508, 4.2301560906272915]]], [[[4.4700565831523358,
4.0770378026885163, 1.7253545052027217, 1.1055903230895199], [5.4163446234764399, 4.9766765770729942,
9.507064642034349, 5.4957347879826326], [0.26827155990715745, 1.5316093845123175, 2.1638806366156755,
0.98376026836685426]], [[8.2618695500366925, 3.5568887159837144, 6.679331918709221, 5.9131421334220162],
[0.75859235253660717, 5.8569639950986181, 2.8203906244439167, 4.9734836920697676], [6.1608163580439932,
8.0345454482414276, 5.8662363008016767, 8.6210771430878168]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.95834404894707026, 7.9879571559046756, -0.087495203957278278,
8.634640034047683], [9.6602599931380571, 7.304689014401287, 0.3313931602685054, 5.946570366033936],
[1.8377822908616932, 0.543554569275015, 8.3282311334980079, 4.3222242363528416]], [[6.7931350723132509,
2.5672105266258347, 1.9287682528663268, 0.77759534462161639], [2.8076329506857651, 6.6225412992584971,
4.8543317818126495, 0.78822322765948094], [6.1381610165574623, 6.8290462548954523, 0.165974102195662,
0.27273616456678873]]], [[[2.8832557411639432, 4.1154195598282417, 1.1371177425794539, 1.5173988784167309],
[0.78390572613644505, 8.9554084073803857, 1.0096332385890845, 8.2596006753194988], [4.9535070672296744,
3.3459325971479177, 3.6611558563117095, 4.5508832878552585]], [[2.8219803367727376, 8.8345393067278124,
3.823267141521197, 5.4095870799423844], [9.0495482756197028, 5.3497012543067815, 0.42521789183563996,
2.8378783625159025], [6.4066575122778655, 3.2087922377486375, 4.283047023175337, 3.0609402027435593]]],
[[[7.5997277216370369, 7.5662015008218937, 5.2473048334414667, 2.9021206260694505], [2.0773043296784852,
7.0677663973064515, 8.6346874701235112, 8.4461581006936779], [0.24330924783440366, 2.7083459963831311,
8.1452754306024087, 0.6971349716056956]], [[2.7009783046129616, 8.3756152605845138, 9.6623612400808465,
9.7784604972550095], [9.4115278590810725, 0.79248336815039089, 3.9948551817025608, 6.8926018585610533],
[9.3725803557695979, -0.078130432409709627, 5.9700373218485252, 6.3217518570320879]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[-3.1509236523814286, 1.680234058442708, -1.7187977550532416, 3.9846453843972913],
[-1.6754979614332322, -3.8450074807346901, -1.5740330789137689, -4.4201074343218751], [2.276529915966389,
-0.80235747833916982, 4.571247045598767, -3.4093255486695617]], [[-4.0166628667791446, -1.3933240066153738,
-1.215071574667598, -3.4706735067142258], [-3.0960303329082572, 4.3009033191704589, 4.4065883064621634,
4.8965445768019009], [-4.4443460968929758, 3.8975314333052253, -4.4153045047286144, 1.7496820405056166]]],
[[[1.634274247051799, -2.4623052709302771, 1.4279180811059975, 0.92544783745377668], [-4.4862942162658106,
-0.17080151547727951, 0.52532922395695625, -0.11419327223481623], [-1.1603038628614835, -2.5757515035829472,
1.9959550719114718, -1.7953240768392242]], [[4.9309159450812103, 3.2298165897638906, -0.075208625571880461,
-1.1899071115534432], [1.6545058865005409, -1.9426363189361773, 1.620629502101667, -4.2257681218133687],
[-0.24689686416986767, 2.1247379677905815, -0.022501917990521925, -1.9988138278359822]]], [[[-2.16170138942825,
1.2184335532362125, 1.1509535832826323, 2.2195238124001797], [2.7455643566460015, 4.6453581322389361,
-4.1082447076462643, -4.0639146315693067], [-4.96116105494092, -3.6915142795866762, -1.2186796693827917,
4.7933913234222967]], [[2.0022553772723217, -0.96891528014022654, -2.5457411370843142, -3.3574915783043058],
[0.10326637441549735, 2.2065594442944327, 3.4159550457557479, -0.71182719653128945], [-1.5473005591196651,
-1.8237704422942014, 3.7660184612895105, -2.1565964302540372]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-2.7644812297675436, 0.40931971763703956, 3.611075059192606,
0.50972765741910564], [-4.2130726841282584, -1.1190277433669751, -0.71203745760782766, -3.152956525368753],
[-1.6186056313723087, 1.1274726343098616, 4.4133392834898437, 1.5220424195160689]], [[0.16147933294385375,
2.4654462130650998, -2.2315133839410328, -4.5248215067907562], [2.2226933853289026, 3.7083490689582508,
1.6042940030913613, 0.26178935291219929], [2.4033332562872989, 2.6116613010273229, -3.5340848426974594,
-4.3871506552920767]]], [[[-2.5011422414749243, -2.9785737952530678, -4.0632268435384287, -2.9061747268645899],
[-3.4361922491984487, 0.92512310228203631, -3.7591410062368915, -0.10199113857196274], [1.4370716393838645,
0.71874746237537668, -4.5480615526025323, -3.9385610102938093]], [[-3.5039474073115562, 1.4740925776889409,
-0.06403798877318323, -3.3828440686373753], [-1.9590119108809123, -0.13446729158123816, -2.4360152863347251,
0.81375486060557112], [2.4638296949211451, 0.84554464160795018, 1.0770605717668191, 0.90311465710515648]]],
[[[-3.0365259446312756, -2.1113062138954444, 3.190598106141481, 4.7146234105400531], [4.7073713389281071,
2.0949812753843036, 1.902801485931489, -0.4384294077249864], [-4.4341512258710214, 4.114619941421422,
4.1663347911930675, -0.082374028629738305]], [[-0.58950965471106098, -1.9744112566224792, -0.0098348725084971278,
2.3871548847218813], [-1.1861224380121662, -3.8703032573387253, 0.2332725218101972, 2.7881117501797101],
[-4.3313677243610327, 2.5428749523942127, 3.9018944633638419, -0.49408732338659789]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[1.8433628252117984, 1.5322432245117268, 0.55363793461945665, 4.6657626927783653],
[-0.94710403494804751, 3.9800168829397649, 3.0366988370600794, 2.8875431155604332], [-1.188024345098996,
1.0665386751463011, 4.7835901054797993, 2.5969696632689807]], [[-1.99850752062535, 1.1333681341555639,
-0.49718999089842697, 1.1440753369804515], [0.26294280812698378, -3.8684363170040701, 0.061030108864615684,
-4.1179127492349608], [-4.67031644465197, 4.9054510497550492, -0.2640662442281041, 1.363134852748785]]],
[[[-1.4621905107325697, -2.8811881835070574, -2.0127263016810106, 3.9187151372775499], [4.0559843147336121,
3.8748150284806506, -4.7195991819934049, 1.6441241199343715], [1.1018797372155733, 1.5720711461020827,
-2.8718182782954003, -2.4926472889456743]], [[2.1583981297206112, -2.7029142786449709, -4.0306810999276212,
-0.041927417439557857], [2.5297094316362001, 3.2023688131127575, -0.87830172094753056, 1.5087811969314782],
[0.94040146920827272, 1.8042467131134678, 2.6306472495122346, 0.16819275341523543]]], [[[0.15798239523545377,
2.4104584738150319, 2.3850248364278386, 3.2174938931658534], [4.8575582926065533, 0.30772922316230389,
-4.4397211951638047, 0.39063821497748741], [-2.3146321369181688, -3.0703095447217885, 1.7397877979741549,
4.033153568325778]], [[-1.7935270727714037, -3.9682025038313595, -3.4065483616803141, 2.1844510922893523],
[-4.2449404804537032, 1.9572337718531996, -4.6593011375931308, 0.98236210083608633], [4.8624542464851288,
0.5657266529616205, 0.50114562982511135, -3.2736237576584317]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-1.3075608271696302, 3.2124772829544348, -1.165159820433785, 8.6504080771756566],
[-2.6226019963812797, 0.13500940220507474, 1.4626657581463105, -1.5325643187614419], [1.088505570867393,
0.26418119680713126, 9.3548371510785664, -0.81235588540058101]], [[-6.0151703874044946, -0.25995587245980989,
-1.7122615655660249, -2.3265981697337743], [-2.8330875247812735, 0.43246700216638878, 4.4676184153267791,
0.77863182756694016], [-9.1146625415449449, 8.8029824830602745, -4.6793707489567185, 3.1128168932544016]]],
[[[0.17208373631922935, -5.3434934544373345, -0.58480822057501314, 4.8441629747313266], [-0.4303099015321985,
3.7040135130033711, -4.1942699580364486, 1.5299308476995552], [-0.058424125645910152, -1.0036803574808646,
-0.87586320638392845, -4.2879713657848981]], [[7.0893140748018215, 0.52690231111891972, -4.1058897254995017,
-1.2318345289930011], [4.184215318136741, 1.2597324941765802, 0.74232778115413645, -2.7169869248818905],
[0.69350460503840505, 3.9289846809040494, 2.6081453315217127, -1.8306210744207467]]], [[[-2.0037189941927962,
3.6288920270512444, 3.5359784197104709, 5.4370177055660331], [7.6031226492525548, 4.95308735540124,
-8.5479659028100698, -3.6732764165918192], [-7.2757931918590888, -6.7618238243084647, 0.52110812859136324,
8.8265448917480747]], [[0.20872830450091806, -4.9371177839715861, -5.9522894987646282, -1.1730404860149535],
[-4.1416741060382058, 4.1637932161476323, -1.2433460918373829, 0.27053490430479687], [3.3151536873654637,
-1.2580437893325809, 4.2671640911146218, -5.430220187912469]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.92111840455574523, 1.9415629421487663, 4.1647129938120626,
5.1754903501974709], [-5.1601767190763059, 2.8609891395727898, 2.3246613794522517, -0.26541340980831984],
[-2.8066299764713047, 2.1940113094561626, 9.1969293889696431, 4.1190120827850496]], [[-1.8370281876814962,
3.5988143472206637, -2.7287033748394598, -3.3807461698103047], [2.4856361934558864, -0.16008724804581931,
1.665324111955977, -3.8561233963227615], [-2.2669831883646712, 7.5171123507823721, -3.7981510869255635,
-3.0240158025432917]]], [[[-3.9633327522074939, -5.8597619787601252, -6.0759531452194393, 1.0125404104129601],
[0.61979206553516342, 4.7999381307626869, -8.4787401882302973, 1.5421329813624087], [2.5389513765994378,
2.2908186084774593, -7.4198798308979326, -6.4312082992394837]], [[-1.345549277590945, -1.22882170095603,
-4.0947190887008045, -3.4247714860769332], [0.57069752075528779, 3.0679015215315193, -3.3143170072822556,
2.3225360575370493], [3.4042311641294178, 2.649791354721418, 3.7077078212790537, 1.0713074105203919]]],
[[[-2.8785435493958218, 0.29915225991958749, 5.5756229425693196, 7.9321173037059065], [9.5649296315346604,
2.4027104985466075, -2.5369197092323157, -0.047791192747498989], [-6.7487833627891902, 1.0443103966996334,
5.9061225891672224, 3.9507795396960397]], [[-2.3830367274824646, -5.9426137604538383, -3.4163832341888112,
4.5716059770112336], [-5.4310629184658694, -1.9130694854855257, -4.4260286157829336, 3.7704738510157965],
[0.53108652212409613, 3.1086016053558332, 4.4030400931889533, -3.7677110810450296]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-0.481249850026)+(1.-msk_arg0)*(-1.48465416864)
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(-2.65110429185)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(-3.13235414188)+(1.-msk_ref)*(-4.13575846049)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(1.13411439983)+(1.-msk_arg0)*(-0.629637549331)
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([-0.62992419613163175, 4.55886114005793])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([0.50419020369403444, 5.6929755398835962])+(1.-msk_ref)*numpy.array([-1.259561745462479,
3.9292235907270827])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(3.01809294358)+(1.-msk_arg0)*(0.889743657807)
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-2.793178683106079, -2.6222774715493582, 1.0142792223620747, -3.0640922264732984,
-2.3554298671206055], [0.088775964219395043, 3.4441381957619619, 3.3892189758872853, 2.7423767697866088,
3.977644321141641], [1.4526982641352157, 2.2184052986969505, -3.952710218879385, -4.7169576073736375,
-0.7937042808225101], [2.2686916098744314, -1.553248315886353, -2.7367045745859819, 3.7958840729585344,
1.4548199443717298]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[0.22491426047411567, 0.39581547203083645, 4.0323721659422693, -0.045999282893103732,
0.66266307645958911], [3.1068689077995897, 6.4622311393421565, 6.4073119194674799, 5.7604697133668035,
6.9957372647218357], [4.4707912077154104, 5.2364982422771451, -0.93461727529919036, -1.6988646637934428,
2.2243886627576845], [5.2867845534546261, 1.4648446276938416, 0.28138836899421271, 6.813977016538729,
4.4729128879519244]])+(1.-msk_ref)*numpy.array([[-1.9034350252987218, -1.732533813742001, 1.9040228801694319,
-2.1743485686659412, -1.4656862093132483], [0.97851962202675224, 4.3338818535693191, 4.2789626336946425,
3.632120427593966, 4.8673879789489982], [2.3424419219425729, 3.1081489565043077, -3.0629665610720278,
-3.8272139495662802, 0.096039376984847102], [3.1584352676817886, -0.66350465807899583, -1.8469609167786247,
4.6856277307658916, 2.344563602179087]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-4.98444562132)+(1.-msk_arg0)*(4.30756765987)
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[1.9993822405268356, -3.1230808428690615], [4.9036400439562815, -4.8838867997176525]],
[[0.42763250705520939, 1.7579324334230453], [-3.7242679708963458, 1.8833596506298056]], [[-3.5481907533254931,
0.2040318933875751], [-2.5124574767604746, -4.1576503017979416]], [[2.4187154671810562, -0.51775884222858526],
[-1.722028671225063, 4.8177194310600537]], [[3.5460779618762999, 3.7426721831596925], [-3.14876579453641,
-1.8491069265603413]], [[-2.0602497125201733, 1.8445672729830882], [2.6289048953955998, -2.1171625740448654]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-2.9850633807947604, -8.1075264641906575], [-0.080805577365314463,
-9.8683324210392485]], [[-4.5568131142663866, -3.2265131878985507], [-8.7087135922179417, -3.1010859706917904]],
[[-8.5326363746470886, -4.7804137279340209], [-7.4969030980820701, -9.1420959231195376]], [[-2.5657301541405397,
-5.5022044635501812], [-6.7064742925466589, -0.16672619026154223]], [[-1.4383676594452961, -1.2417734381619034],
[-8.1332114158580069, -6.8335525478819372]], [[-7.0446953338417693, -3.1398783483385078], [-2.3555407259259962,
-7.1016081953664614]]])+(1.-msk_ref)*numpy.array([[[6.3069499004015404, 1.1844868170056433], [9.2112077038309863,
-0.57631913984294769]], [[4.7352001669299142, 6.0655000932977501], [0.58329968897835904, 6.1909273105045104]],
[[0.75937690654921175, 4.5115995532622799], [1.7951101831142302, 0.14991735807676321]], [[6.726283127055761,
3.7898088176461195], [2.5855389886496418, 9.1252870909347585]], [[7.8536456217510047, 8.0502398430343973],
[1.1588018653382948, 2.4584607333143635]], [[2.2473179473545315, 6.152134932857793], [6.9364725552703046,
2.1904050858298394]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank0_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.9697925334)+(1.-msk_arg0)*(-4.26135335725)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[3.9689996783063126, 2.6024749301521517, -2.8657897182202263, 3.4523361907793202],
[1.0646468808240472, 2.2809214673673006, 1.9110441510817342, 3.6637536830808415], [-4.8161620946685977,
1.1260192950202335, -1.5444099528131283, 4.5856953227320361]], [[3.4807853259935388, 1.0632821522370133,
-1.7813251042294, 0.96803702807832348], [-2.2395880868316476, 4.8919502166960243, 3.0915081953974273,
-0.85921425228962178], [-0.24500754865585961, -3.000069805276242, -2.3285433357124861, -3.7526812827715004]]],
[[[-2.6148866735769314, -2.9426881222754986, -2.1105189060422127, -1.718323686970705], [0.38236683235255065,
4.8146833101999391, -0.69724678041282662, -3.674837501299455], [-1.1217878757973345, 1.9457797122429064,
4.3330454272287042, 1.2870165165330079]], [[0.90390350707926448, 4.0932246664578322, 4.0170833493811937,
2.3057200276883218], [-4.1149618340720506, 4.3206785552080422, 4.5478406361616468, 3.4270491303459689],
[-3.2122582790653578, -0.051138136931458078, 2.847106348954056, -2.0922906343243097]]], [[[-3.8470709835005801,
0.79389346854249432, 1.9702586564654192, -1.230993932131331], [0.52027641197917784, 4.1606002966489264,
-4.1240899145057277, 3.0855602864655047], [1.2434749670286918, 1.9421106344042691, -4.7997149299258455,
-3.1016051858236517]], [[-4.0158867307020536, -1.2810983979769732, 4.1806447574751786, 2.4159993753375488],
[3.8210591526688589, 2.9170696329659753, 0.212629682453775, -3.6791629346607402], [-0.52709663403725493,
-2.0893727810689953, -1.7473644406170976, -4.1869442335699976]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[0.99920714490574225, -0.36731760324841867, -5.8355822516207967, 0.48254365737874982],
[-1.9051456525765231, -0.68887106603326975, -1.0587483823188362, 0.69396114968027112], [-7.7859546280691685,
-1.8437732383803369, -4.5142024862136987, 1.6159027893314657]], [[0.51099279259296848, -1.9065103811635571,
-4.7511176376299709, -2.0017555053222469], [-5.2093806202322179, 1.9221576832954539, 0.12171566199685691,
-3.8290067856901921], [-3.21480008205643, -5.9698623386768119, -5.2983358691130569, -6.7224738161720712]]],
[[[-5.5846792069775013, -5.9124806556760685, -5.0803114394427826, -4.6881162203712758], [-2.5874257010480197,
1.8448907767993687, -3.667039313813397, -6.6446300347000253], [-4.0915804091979044, -1.024012821157664,
1.3632528938281339, -1.6827760168675625]], [[-2.0658890263213059, 1.1234321330572619, 1.0472908159806233,
-0.66407250571224852], [-7.0847543674726214, 1.3508860218074719, 1.5780481027610764, 0.45725659694539855],
[-6.1820508124659277, -3.0209306703320284, -0.12268618444651436, -5.0620831677248805]]], [[[-6.8168635169011509,
-2.175899064858076, -0.99953387693515117, -4.2007864655319018], [-2.4495161214213925, 1.190807763248356,
-7.0938824479062976, 0.11576775306493436], [-1.7263175663718786, -1.0276818989963012, -7.7695074633264163,
-6.0713977192242226]], [[-6.9856792641026235, -4.250890931377544, 1.2108522240746082, -0.55379315806302154],
[0.8512666192682885, -0.052722900434595044, -2.7571628509467954, -6.6489554680613105], [-3.4968891674378253,
-5.0591653144695652, -4.7171569740176675, -7.1567367669705675]]]])+(1.-msk_ref)*numpy.array([[[[-0.29235367894345909,
-1.65887842709762, -7.1271430754699985, -0.80901716647045152], [-3.1967064764257245, -1.9804318898824711,
-2.3503092061680375, -0.59759967416893023], [-9.0775154519183694, -3.1353340622295383, -5.8057633100629005,
0.32434196548226435]], [[-0.78056803125623286, -3.1980712050127584, -6.0426784614791718, -3.2933163291714482],
[-6.5009414440814197, 0.63059685944625254, -1.1698451618523444, -5.1205676095393935], [-4.5063609059056313,
-7.2614231625260137, -6.5898966929622578, -8.0140346400212721]]], [[[-6.8762400308267031, -7.2040414795252703,
-6.3718722632919844, -5.9796770442204767], [-3.8789865248972211, 0.5533299529501674, -4.9586001376625983,
-7.9361908585492262], [-5.3831412330471062, -2.3155736450068654, 0.071692069978932516, -2.9743368407167639]],
[[-3.3574498501705072, -0.16812869079193948, -0.244270007868578, -1.9556333295614499], [-8.3763151913218223,
0.059325197958270515, 0.28648727891187509, -0.83430422690380279], [-7.4736116363151295, -4.3124914941812298,
-1.4142470082957157, -6.3536439915740814]]], [[[-8.1084243407503518, -3.4674598887072774, -2.2910947007843525,
-5.4923472893811027], [-3.7410769452705939, -0.10075306060084532, -8.3854432717554985, -1.175793070784267],
[-3.01787839022108, -2.3192427228455026, -9.0610682871756172, -7.3629585430734235]], [[-8.2772400879518244,
-5.5424517552267449, -0.080708599774593104, -1.8453539819122229], [-0.44029420458091284, -1.3442837242837964,
-4.0487236747959967, -7.9405162919105123], [-4.7884499912870266, -6.350726138318767, -6.0087177978668693,
-8.4482975908197702]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([2.1945719955206853,
-3.4851810549539852])+(1.-msk_arg0)*numpy.array([-3.159460740559509, 1.0507096466806898])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.92811762582)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([5.1226896213358133,
-0.5570634291388572])+(1.-msk_ref)*numpy.array([-0.23134311474438096, 3.9788272724958178])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank1_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([1.9387192390641195,
-2.294788495198282])+(1.-msk_arg0)*numpy.array([-3.9950296964046816, -4.9584579002903517])
arg1=Symbol(shape=(2,))
res=arg0+arg1
s1=numpy.array([0.68148355985483988, 0.33396702170122339])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([2.6202027989189594, -1.9608214734970586])+(1.-msk_ref)*numpy.array([-3.3135461365498418,
-4.6244908785891283])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[1.9335525790389809, 4.8876884032830024, -3.6794048434152948, -2.9337672885330814,
0.5880232587543972], [1.2731441866942719, 4.8021715240969982, 2.9871285060348427, 4.3674026791776921,
2.3324101078324144], [3.257367767879968, 3.614481137699638, -4.0465097244122443, -3.3712543524462166,
0.83424572698980626], [-4.7734011845397317, -1.1918316514932537, -2.641576771310632, -3.7441723823507447,
2.5792398168240602]])+(1.-msk_arg0)*numpy.array([[0.51038147587387783, -3.548018657118809, 3.7494118465432393,
3.6729170048063136, -2.9522974158811746], [3.2109365766033289, -1.7347320393345091, -0.9996429948297223,
-0.75500884718678307, 1.5928790967815267], [-4.1174844249701259, 4.2030131668606234, -4.8484509001230229,
2.7032344298767921, 4.3009935101668333], [-1.4527019870327429, 3.9347061378002781, 1.21415230923688,
-3.666838308237784, -3.8400590973123858]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.22997214356)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[5.1635247225953336, 8.117660546839355, -0.44943269985894219, 0.29620485502327121,
3.8179954023107499], [4.5031163302506245, 8.0321436676533509, 6.2171006495911953, 7.5973748227340447,
5.5623822513887671], [6.4873399114363206, 6.8444532812559906, -0.81653758085589168, -0.14128220888986398,
4.0642178705461589], [-1.5434290409833791, 2.038140492063099, 0.58839537224572069, -0.51420023879439203,
5.8092119603804129]])+(1.-msk_ref)*numpy.array([[3.7403536194302305, -0.31804651356245639, 6.979383990099592,
6.9028891483626662, 0.27767472767517809], [6.4409087201596815, 1.4952401042218435, 2.2303291487266304,
2.4749632963695696, 4.8228512403378794], [-0.88751228141377325, 7.4329853104169761, -1.6184787565666703,
5.9332065734331447, 7.5309656537231859], [1.7772701565236098, 7.1646782813566308, 4.4441244527932326,
-0.43686616468143136, -0.61008695375603317]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank2_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[-0.074742989914646785, -1.8482493880577588, 1.0926262448311599, 4.5158483202643716,
-3.0805669333005561], [0.0085606966159099684, -2.9696862086974996, 3.3024460854167597, 1.5088165460119427,
-3.6452065491857266], [0.18694035412066512, -4.6738922180085147, 3.9551045875071438, 4.0084174115638724,
-0.63332177275981749], [2.5093858800842108, -0.36171911019222946, 0.19138395375626427, -3.1795621861527734,
-2.6267949144535008]])+(1.-msk_arg0)*numpy.array([[-3.5942187686631524, -3.7060821431133406, 0.9533196788857623,
-4.8840044000628744, 0.3938790125214453], [4.0652979493208985, 4.5325841421496644, -0.4281905049316661,
-1.742508580451184, 2.7120740894023898], [0.56888661640784566, -2.4569299021956068, 3.568568120069024,
-2.0793352745659766, -1.7689628659930126], [-4.8632954420706014, -2.8828667280653364, 3.4090243893802246,
3.0651732601260697, 4.6463764755640256]])
arg1=Symbol(shape=(4, 5))
res=arg0+arg1
s1=numpy.array([[-1.4953863183942318, -3.5127993001524969, 2.9138150805794103, -1.6144165168200519,
-0.65062618022498242], [-4.9181569250500168, -2.6971927119277908, 4.2365880197149934, -4.2036145824282496,
2.2260090531531453], [4.0868409931398002, -3.3893548967194032, 2.9012650531553019, -2.2355683566643378,
2.9627609193479501], [4.9921359000605019, 0.6569024014440803, 3.3639734573108839, 0.89356331435440595,
-4.0709626638242327]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[-1.5701293083088785, -5.3610486882102553, 4.0064413254105702, 2.9014318034443196,
-3.7311931135255385], [-4.9095962284341068, -5.6668789206252903, 7.5390341051317531, -2.6947980364163069,
-1.4191974960325813], [4.2737813472604653, -8.0632471147279183, 6.8563696406624457, 1.7728490548995346,
2.3294391465881326], [7.5015217801447127, 0.29518329125185083, 3.5553574110671482, -2.2859988717983675,
-6.6977575782777334]])+(1.-msk_ref)*numpy.array([[-5.0896050870573841, -7.2188814432658379, 3.8671347594651726,
-6.4984209168829263, -0.25674716770353712], [-0.85285897572911828, 1.8353914302218737, 3.8083975147833273,
-5.9461231628794335, 4.9380831425555352], [4.6557276095476459, -5.8462847989150095, 6.4698331732243259,
-4.3149036312303144, 1.1937980533549375], [0.12884045798990051, -2.2259643266212561, 6.7729978466911085,
3.9587365744804757, 0.57541381173979289]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-2.1957568090391955, 0.56747277575122101], [-1.4226171578539604,
-3.1174336379255854]], [[1.9150168705353749, 0.46771483389240665], [-0.73261624542450932, 1.4533109165427449]],
[[-4.3700026677098416, -4.4121889510507675], [-4.2432470132589684, -4.6365817911825937]], [[4.3712760608754326,
0.48815678812850649], [-4.2919585871561221, 2.8753619236403747]], [[4.7410827225779482, -3.2941488290580354],
[3.5834613437014919, 0.53477849558006074]], [[-2.2697241902980902, 1.4839036193452078], [4.3514574228344109,
2.0334834769049763]]])+(1.-msk_arg0)*numpy.array([[[1.9065956016010119, 3.8011536401496766], [4.2481111431072272,
0.7657337986451509]], [[1.7488690210709832, 4.5064595133713876], [-1.261534521038973, -1.5095749568667172]],
[[1.2010203264269057, 0.055494332510111377], [4.3269730839285749, -0.54412407243328076]], [[-2.6257140205956175,
-3.4462245120816002], [1.3451771798822101, 2.462398203439907]], [[-2.5713124204289493, 1.9356323962441504],
[1.8879658089499234, 3.1212800001648091]], [[1.942043508304808, 0.80539011514164471], [-0.3765200612428643,
0.73339801844715691]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(2.24723235412)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[0.05147554507665264, 2.8147051298670691], [0.82461519626188773,
-0.87020128380973727]], [[4.162249224651223, 2.7149471880082547], [1.5146161086913388, 3.700543270658593]],
[[-2.1227703135939935, -2.1649565969349194], [-1.9960146591431203, -2.3893494370667456]], [[6.6185084149912807,
2.7353891422443546], [-2.044726233040274, 5.1225942777562228]], [[6.9883150766937963, -1.0469164749421873],
[5.83069369781734, 2.7820108496959088]], [[-0.022491836182242153, 3.7311359734610559], [6.598689776950259,
4.2807158310208244]]])+(1.-msk_ref)*numpy.array([[[4.15382795571686, 6.0483859942655247], [6.4953434972230752,
3.012966152760999]], [[3.9961013751868313, 6.7536918674872357], [0.98569783307687509, 0.73765739724913093]],
[[3.4482526805427538, 2.3027266866259595], [6.574205438044423, 1.7031082816825673]], [[-0.37848166647976944,
-1.1989921579657521], [3.5924095339980582, 4.7096305575557551]], [[-0.32408006631310116, 4.1828647503599985],
[4.1351981630657715, 5.3685123542806572]], [[4.1892758624206561, 3.0526224692574928], [1.8707122928729838,
2.980630372563005]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank3_Symbol_rank3(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[-3.6330041831896742, 1.9011276595647058], [4.0527837903730326, 3.7453216540822218]],
[[1.1423057067323032, -4.6191355501663702], [-0.19479401086936399, 3.6518312558771875]], [[-0.78164127432320996,
-0.0025588788834731702], [-2.5155059876978534, -2.7853664238124578]], [[-2.4557560474662496, -1.7001261418483038],
[2.2437567320884249, -4.5528490181464578]], [[3.3965240991344601, 2.7531638892344281], [-1.0182649859279858,
0.37879180372082377]], [[-2.2634040587587356, -3.6908761533687482], [-2.6652399154901509,
-2.0159814304593739]]])+(1.-msk_arg0)*numpy.array([[[4.9981907924797788, 4.277720751221235], [-4.4785446333946686,
-3.8140270519701982]], [[1.4517149340948965, 1.9122847710945834], [-1.0984824997077558, 4.9260526287710995]],
[[3.0231870187238314, -4.426803554802202], [-0.1009215503507912, -2.4226611633877337]], [[3.1439947236211125,
-2.7156096061802728], [-0.27949941006709977, 0.15562912547547469]], [[-1.6704879956646712, -0.87822202800174587],
[-4.0968204088950708, -4.8812474874399072]], [[-3.0876637956180186, 0.42808604578959475], [-0.76617423765119153,
1.4811418969805343]]])
arg1=Symbol(shape=(6, 2, 2))
res=arg0+arg1
s1=numpy.array([[[-3.655791939954395, 1.9082625611635287], [2.0305234873740705, -3.9575879711347337]],
[[0.58883813376680294, -0.44253502109642717], [-0.50659655202841058, 4.7262250303753071]], [[2.3551049262619417,
-2.7472704728416062], [-4.2131185370897501, 1.1560716927603512]], [[-1.8521430501234626, -2.8126771236453196],
[-1.6116964851382032, 4.3144406033510982]], [[-4.4005771771028979, -3.8795508309654512], [0.95903540985898683,
-0.84559016177598512]], [[-2.6007509769442674, -0.13151235868250399], [-1.5038936232862978, -3.9733280592961249]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[-7.2887961231440688, 3.8093902207282344], [6.0833072777471031, -0.21226631705251187]],
[[1.7311438404991062, -5.0616705712627974], [-0.70139056289777457, 8.3780562862524945]], [[1.5734636519387317,
-2.7498293517250794], [-6.7286245247876035, -1.6292947310521066]], [[-4.3078990975897122, -4.5128032654936234],
[0.63206024695022167, -0.23840841479535957]], [[-1.0040530779684378, -1.1263869417310231], [-0.059229576068998924,
-0.46679835805516134]], [[-4.8641550357030034, -3.8223885120512522], [-4.1691335387764488,
-5.9893094897554988]]])+(1.-msk_ref)*numpy.array([[[1.3423988525253838, 6.1859833123847636], [-2.4480211460205981,
-7.7716150231049319]], [[2.0405530678616994, 1.4697497499981562], [-1.6050790517361664, 9.6522776591464066]],
[[5.3782919449857731, -7.1740740276438082], [-4.3140400874405413, -1.2665894706273826]], [[1.29185167349765,
-5.5282867298255924], [-1.891195895205303, 4.4700697288265729]], [[-6.0710651727675691, -4.757772858967197],
[-3.137784999036084, -5.7268376492158923]], [[-5.688414772562286, 0.29657368710709076], [-2.2700678609374894,
-2.4921861623155905]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[4.965007128412612, 3.4584141019026564, -1.0391619896304451, 4.5542963326499351],
[-0.0016792172679549466, -2.9053441565334981, 0.025786108583792711, -0.89554847161554374], [4.4904084527351209,
-0.89553646258473307, 3.8929449623498495, -2.8715607346304415]], [[-3.727374719009604, 2.2555823384608908,
0.53380019017552272, -0.29480940480144113], [-3.6344667828862445, -4.8499559892732567, 3.5342171405331317,
1.9875915936023327], [3.0643486049591804, -2.9482947381564806, 1.257296440825332, -4.4599817600046716]]],
[[[-3.7989993001254971, 4.2006768317373879, -1.9340842456373886, 0.25295780568139836], [0.15305381262779072,
2.184447614622945, -2.0595806484522039, 1.6196719151709491], [-1.550459702477788, 2.2328097059995393,
-3.2648987061947632, -1.7698524550474004]], [[-3.1067614393264673, 3.6490340896776274, 4.2948603770463407,
-3.4382940099694084], [-1.765073080880275, 2.5928931740693892, 2.2530590640640069, 2.7653349815108443],
[-0.88766895991026384, 3.8444038125137965, 3.8283329993863564, 1.6961545196727537]]], [[[-1.6941819291782823,
-4.3507603532160344, 0.58625398426930175, -4.9534370199923137], [4.3258398610183271, 4.7398172498630355,
-0.27425006429631082, -0.80958052389792012], [0.27800145594245151, -0.70646630926925713, -1.3619199397032533,
-0.22712536683851958]], [[-3.7307177958823781, -0.17135910311966995, -1.2454260400370809, 1.8499155339141273],
[0.7652733563966283, -4.2318891899847593, 4.1390775019993704, 2.1086112655335079], [-4.4480501135282662,
4.3290513315610166, -4.1098101623830443, -2.8839598970399614]]]])+(1.-msk_arg0)*numpy.array([[[[3.9323713317642746,
4.4527426387356446, 1.8489227456459432, 2.295838413561385], [-1.5932231826477694, -0.043483214358698064,
2.6866561252017789, -1.3064680912144833], [-4.563955043071191, -4.5294274892608124, 1.1139333008427865,
-3.356095173880258]], [[-0.39784058429088365, 1.3572530126249651, 0.73921609667405086, -2.8036097598039502],
[-1.6466307808609693, -3.6730522383966999, -4.2815488732075613, -3.0943250956889665], [0.84471742986867238,
3.3304241697775492, -2.7207357502431542, -1.8257126717947059]]], [[[0.21030801293033274, 4.6379651350087698,
4.213456762528347, 4.0550184068364885], [-2.5755175539757227, 2.6713165204428986, 3.2808072440183729,
2.8475364996882107], [4.8503832880401561, -0.89396576884489498, 4.8726952699950328, 1.8570156992262419]],
[[-4.6778874236692944, 2.1109769293880465, 0.79097589510131172, -2.1112073984121893], [2.558958067688426,
2.8307096810380727, 0.012443144332241474, -3.7601222060065065], [-1.3755439053562823, 2.9800220614031678,
1.6579582033193425, 4.4427116407434362]]], [[[-0.86660146317817688, 1.3032310329697525, 3.0027070238303377,
-2.9114837729491319], [-3.4567748888099636, 3.3638086688271702, 4.1486162466002519, 2.0749122046757407],
[0.84439318528796647, -3.6592289308593697, 0.77430002321168345, 1.7927967246699836]], [[-1.1981415218608116,
2.3445312580391588, -1.5436298697897444, 1.6111465180751141], [1.6230738725320037, -1.3035089800291666,
-4.6787506207538687, 2.9155460797717678], [3.3315156088599238, -3.5200805068877128, -1.1181004173108544,
-2.2485916181204857]]]])
arg1=Symbol(shape=())
res=arg0+arg1
s1=numpy.array(3.43950171094)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[8.4045088393544027, 6.8979158128444471, 2.4003397213113455, 7.9937980435917257],
[3.4378224936738357, 0.5341575544082926, 3.4652878195255834, 2.543953239326247], [7.9299101636769116,
2.5439652483570576, 7.3324466732916402, 0.56794097631134921]], [[-0.28787300806781335, 5.6950840494026815,
3.9733019011173134, 3.1446923061403496], [-0.19496507194445378, -1.410454278331466, 6.9737188514749224,
5.4270933045441234], [6.5038503159009711, 0.49120697278531011, 4.6967981517671227, -1.0204800490628809]]],
[[[-0.35949758918370645, 7.6401785426791786, 1.5054174653044021, 3.6924595166231891], [3.5925555235695814,
5.6239493255647357, 1.3799210624895868, 5.0591736261127398], [1.8890420084640027, 5.67231141694133, 0.1746030047470275,
1.6696492558943903]], [[0.33274027161532338, 7.0885358006194181, 7.7343620879881314, 0.0012077009723823195],
[1.6744286300615157, 6.0323948850111799, 5.6925607750057976, 6.204836692452635], [2.5518327510315268,
7.2839055234555872, 7.2678347103281471, 5.1356562306145443]]], [[[1.7453197817635084, -0.91125864227424369,
4.0257556952110924, -1.513935309050523], [7.7653415719601178, 8.1793189608048262, 3.1652516466454799,
2.6299211870438706], [3.7175031668842422, 2.7330354016725336, 2.0775817712385374, 3.2123763441032711]],
[[-0.29121608494058737, 3.2681426078221207, 2.1940756709047098, 5.289417244855918], [4.204775067338419,
-0.79238747904296858, 7.5785792129411611, 5.5481129764752986], [-1.0085484025864755, 7.7685530425028073,
-0.67030845144125362, 0.55554181390182933]]]])+(1.-msk_ref)*numpy.array([[[[7.3718730427060652, 7.8922443496774353,
5.2884244565877339, 5.7353401245031757], [1.8462785282940213, 3.3960184965830926, 6.1261578361435696,
2.1330336197273074], [-1.1244533321294004, -1.0899257783190217, 4.5534350117845772, 0.083406537061532671]],
[[3.041661126650907, 4.7967547235667558, 4.1787178076158416, 0.63589195113784047], [1.7928709300808214,
-0.23355052745490923, -0.84204716226577059, 0.34517661525282417], [4.2842191408104631, 6.7699258807193399,
0.71876596069863652, 1.6137890391470848]]], [[[3.6498097238721234, 8.0774668459505605, 7.6529584734701377,
7.4945201177782792], [0.86398415696606801, 6.1108182313846893, 6.7203089549601636, 6.2870382106300013],
[8.2898849989819468, 2.5455359420968957, 8.3121969809368235, 5.2965174101680326]], [[-1.2383857127275038,
5.5504786403298372, 4.2304776060431024, 1.3282943125296014], [5.9984597786302167, 6.2702113919798634,
3.4519448552740322, -0.32062049506471579], [2.0639578055855083, 6.4195237723449585, 5.0974599142611332,
7.8822133516852269]]], [[[2.5729002477636138, 4.7427327439115432, 6.4422087347721284, 0.52801793799265884],
[-0.017273177868172951, 6.8033103797689609, 7.5881179575420425, 5.5144139156175314], [4.2838948962297572,
-0.21972721991757904, 4.2138017341534741, 5.2322984356117743]], [[2.2413601890809791, 5.7840329689809495,
1.8958718411520463, 5.0506482290169048], [5.0625755834737944, 2.1359927309126241, -1.239248909812078,
6.3550477907135585], [6.7710173198017145, -0.080578795945922099, 2.3214012936309363, 1.190910092821305]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_add_overloaded_expandedData_rank4_Symbol_rank4(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*numpy.array([[[[3.2510674404409041, 2.1171696862303406, 2.9610258759664267, -3.8373977579450456],
[0.75383244276133166, 2.4077943881602728, 3.873284406870285, 3.7937584009819574], [-4.6069898901399364,
-2.5452970249895754, 3.650830786457707, -0.56630176651201847]], [[3.6738989513815135, -1.1553536380556686,
4.303352195803182, 2.0201689947921695], [2.5110280594242029, 1.1178178456135743, 3.5722095880572251,
-3.0495901167648221], [-1.8161969765914288, -3.850369287459924, 1.8305771607495833, 3.8129356009276751]]],
[[[4.8159492177547296, -2.7259760165966638, -0.056119891503465524, 3.2320437499651025], [4.1412540490540568,
2.3145635424798332, 4.2298625240821792, -4.9326174629443722], [1.2505234798682396, 4.1728981653768358,
-1.4526511101284445, -0.73865645812869563]], [[-2.5027203270038956, -0.75821705726011146, -2.0074201432570495,
-0.20166798891695503], [1.7962444938241209, 4.9186635916785164, -3.3612255674731486, -3.1402103698143327],
[4.8100127068213077, -3.7003932729639377, -2.3809463861562454, 2.6337296431542621]]], [[[0.8461884816413443,
2.2850095300693116, 3.1039351776827235, 2.7358221987272575], [-1.331100327658973, -2.4718869003284438,
3.8392116060077814, 3.7886003252177218], [-2.740692362699221, -1.1104811343803189, 1.065443269317063,
-1.604926521206449]], [[3.1359320207935291, 2.4159415877072101, -2.9781841648177654, 0.4457695581762291],
[1.4022534028069558, 3.2181877465159641, 4.1561033889739196, -4.5314636502141923], [2.4896032954770373,
-1.6749755107952033, -4.2977752660345292, 4.3862296692093636]]]])+(1.-msk_arg0)*numpy.array([[[[3.8098232095134126,
-2.0180524002497693, 4.420784171182504, -2.4324750966542674], [2.4681882567616125, 3.0279649104786941,
2.2383665512055266, -0.091420157761364251], [4.7846856391630048, 0.45001495814867454, 2.8428137570111911,
3.6542996408716562]], [[-3.3832925941075711, -4.6684050424331947, 2.7145812310865534, 0.57489640415196952],
[3.2363298539062395, -0.28076205609599914, -2.1610563710523598, -3.9600308036480381], [4.1445091213012599,
0.23464603550937735, -4.9214532841127738, 3.7601288072640866]]], [[[4.5878923885513938, -2.7602444517968006,
-2.4823493575559641, -1.1998619544811917], [-1.0165322624110429, 4.8743114304602564, 3.0069704689379755,
2.0086372739622043], [-1.7482883016273565, 4.5233781656491008, 1.0481669308330579, 3.3780108680134457]],
[[-4.5351514069636076, -4.760484108729206, -1.7334568308716203, -4.3080131499917833], [4.0321976091043883,
-2.6576000312675063, 1.3372423488299923, -3.8949616711167625], [3.5793384711817051, 2.60693067621275,
1.8056256765125287, -3.9915454170699869]]], [[[0.39851532295995273, 2.2465287291059273, 0.64170560779626662,
-4.7331314705888738], [3.5329039709028898, -2.5311269573107662, 2.8367974744858193, -4.3457969220676684],
[-1.526677955424999, -2.5983211468943357, -1.3293797580217093, -3.1887378668078279]], [[3.1416335105809505,
0.35146012646543134, 2.428390004415637, 2.7813900205500861], [3.5228217461650111, -0.012304332300811183,
-3.1395042313107369, 4.8647351561551702], [2.2570133784920099, -1.7535240218446777, 0.38792070998653028,
-0.21839923153693785]]]])
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0+arg1
s1=numpy.array([[[[-0.55399336747432937, -3.6468486902030306, 2.4533567494215669, 4.8267547347789659],
[1.1480960590338416, 3.5599245920968787, -2.8247534868419724, -2.2031349101131505], [1.7520095897646017,
4.4293583295521266, -3.2046920932014888, -3.8760923163847472]], [[3.9288042477427645, 1.103593535294765,
0.62546922225950485, 2.5431633219905123], [2.5483588394973191, -0.82358610517599207, -0.47010674146441023,
2.7635563586840011], [3.5616440522317419, 2.2995934729430481, -3.501591556463012, 1.3778428754586027]]],
[[[-4.3918539920661051, 0.24976043236636869, -2.4847081470778463, 4.8636790550226792], [-4.2172400078729559,
-2.0316184192507647, -0.53464794178739794, -0.035422588600630966], [1.7049703562375615, 4.2019750499164399,
-3.7430217705554858, -3.4952387702082346]], [[-0.39925876875124189, 1.4505137462439404, -4.1941814051173072,
-1.844757872605356], [-3.4448187389632414, -3.5340944666273377, -3.178247383159305, -1.7824872241435519],
[-3.6843631882800798, -4.1186208792142187, 2.0636953370355959, -0.18717114434561122]]], [[[-2.4316812831173742,
0.39582208925882689, 1.4893695917228467, -3.1232026180567773], [2.1122901499636226, 4.9884613457151978,
-4.7793541216702149, -3.9541373136233391], [-4.8256481088328194, -0.10764491664526066, 2.9970513787255895,
-1.0443943611478437]], [[3.6491162738908258, 3.4225261399204765, -2.9600723325757849, 3.3422667802452324],
[-3.763493116056098, 4.6894908619506595, 2.532040050484988, 0.99028387045053101], [2.5962274887920085,
-0.2721955960411897, -4.7946284910477441, -0.96141278632713245]]]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[[[2.6970740729665748, -1.52967900397269, 5.4143826253879936, 0.98935697683392032],
[1.9019285017951733, 5.9677189802571515, 1.0485309200283126, 1.5906234908688068], [-2.8549803003753347,
1.8840613045625512, 0.44613869325621813, -4.4423940828967652]], [[7.6027031991242779, -0.051760102760903592,
4.9288214180626868, 4.5633323167826818], [5.059386898921522, 0.29423174043758227, 3.1021028465928149,
-0.28603375808082099], [1.7454470756403131, -1.550775814516876, -1.6710143957134287, 5.1907784763862779]]],
[[[0.42409522568862457, -2.4762155842302951, -2.5408280385813118, 8.0957228049877816], [-0.075985958818899135,
0.28294512322906851, 3.6952145822947813, -4.9680400515450032], [2.9554938361058012, 8.3748732152932757,
-5.1956728806839303, -4.2338952283369302]], [[-2.9019790957551375, 0.6922966889838289, -6.2016015483743567,
-2.046425861522311], [-1.6485742451391205, 1.3845691250511787, -6.5394729506324536, -4.922697593957885],
[1.1256495185412279, -7.8190141521781564, -0.3172510491206495, 2.4465584988086508]]], [[[-1.5854928014760299,
2.6808316193281385, 4.5933047694055702, -0.38738041932951983], [0.78118982230464962, 2.516574445386754,
-0.94014251566243345, -0.16553698840561726], [-7.5663404715320404, -1.2181260510255796, 4.0624946480426525,
-2.6493208823542926]], [[6.7850482946843549, 5.8384677276276866, -5.9382564973935503, 3.7880363384214615],
[-2.3612397132491423, 7.9076786084666235, 6.6881434394589077, -3.5411797797636613], [5.0858307842690458,
-1.9471711068363931, -9.0924037570822733, 3.4248168828822312]]]])+(1.-msk_ref)*numpy.array([[[[3.2558298420390832,
-5.6649010904527994, 6.8741409206040709, 2.3942796381246985], [3.6162843157954541, 6.5878895025755728,
-0.58638693563644573, -2.2945550678745148], [6.5366952289276066, 4.8793732877008011, -0.36187833619029774,
-0.22179267551309101]], [[0.54551165363519338, -3.5648115071384296, 3.3400504533460582, 3.1180597261424818],
[5.7846886934035586, -1.1043481612719912, -2.63116311251677, -1.196474444964037], [7.7061531735330018,
2.5342395084524254, -8.4230448405757858, 5.1379716827226893]]], [[[0.19603839648528876, -2.5104840194304319,
-4.9670575046338108, 3.6638171005414875], [-5.2337722702839988, 2.8426930112094917, 2.4723225271505775,
1.9732146853615733], [-0.043317945389794943, 8.7253532155655407, -2.6948548397224279, -0.11722790219478885]],
[[-4.9344101757148495, -3.3099703624852657, -5.9276382359889279, -6.1527710225971397], [0.58737887014114687,
-6.1916944978948436, -1.8410050343293127, -5.6774488952603139], [-0.10502471709837469, -1.5116902030014687,
3.8693210135481246, -4.1787165614155981]]], [[[-2.0331659601574215, 2.6423508183647542, 2.1310751995191133,
-7.8563340886456512], [5.6451941208665124, 2.4573343884044316, -1.9425566471843956, -8.2999342356910084],
[-6.3523260642578183, -2.7059660635395963, 1.6676716207038802, -4.2331322279556716]], [[6.7907497844717764,
3.7739862663859078, -0.53168232816014793, 6.1236568007953185], [-0.24067136989108695, 4.6771865296498483,
-0.60746418082574882, 5.8550190266057012], [4.8532408672840184, -2.0257196178858674, -4.4067077810612139,
-1.1798120178640703]]]])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank0(self):
arg0=Data(1.30830371112,self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0412291309402)
sub=res.substitute({arg1:s1})
ref=Data(1.26707458018,self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank1(self):
arg0=Data(-4.2604726935,self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.8546037299533653, -1.305392606117024])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-0.4058689635493371, -2.9550800873856784]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank2(self):
arg0=Data(0.902009664206,self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-3.117681444740418, -3.2512793024980069, -3.7762244881344218, -0.50644943812549315,
3.066726444630655], [-2.6348956508380805, -0.90372740616696667, 0.5252271533586752, 2.0132741900533446,
2.0837322808099037], [0.088376617597372586, 0.67864487020517306, 3.7057383001711681, 1.0445042366908988,
-2.1093161712985955], [4.328915747720707, -0.73501622742024342, -0.088412628376807412, -3.0414953794209754,
1.610361274316344]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.0196911089468177, 4.1532889667044071, 4.6782341523408215, 1.4084591023318929,
-2.1647167804242553], [3.5369053150444802, 1.8057370703733664, 0.37678251084772452, -1.1112645258469449,
-1.181722616603504], [0.81363304660902713, 0.22336479400122666, -2.8037286359647684, -0.14249457248449904,
3.0113258355049952], [-3.4269060835143073, 1.6370258916266431, 0.99042229258320713, 3.9435050436273751,
-0.7083516101099443]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank3(self):
arg0=Data(4.30012329043,self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[-2.4328051948060772, 1.3096803933228829], [-1.9201038070201615, 2.2529209930562519]],
[[4.4911763191005498, -0.0070408039855616167], [-4.5070979412665588, 0.23394826644475319]], [[-2.0679275681214171,
4.7260141882743518], [-1.9530690972223672, 4.2165911161948344]], [[4.2340594486013217, 0.31531838157863668],
[1.2102543060708451, 4.5768051588147358]], [[4.9016533619135778, 1.0237157761801843], [-1.6198381225390657,
1.509534129406096]], [[-2.8351524725878399, -0.8712771035569391], [-1.2500793307427105, 0.52784760832550681]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[6.732928485237343, 2.990442897108383], [6.2202270974514278, 2.0472022973750139]],
[[-0.19105302866928398, 4.3071640944168275], [8.8072212316978238, 4.0661750239865126]], [[6.3680508585526834,
-0.42589089784308598], [6.2531923876536331, 0.083532174236431445]], [[0.066063841829944181, 3.9848049088526292],
[3.0898689843604208, -0.27668186838346998]], [[-0.60153007148231197, 3.2764075142510816], [5.9199614129703315,
2.7905891610251699]], [[7.1352757630191057, 5.1714003939882049], [5.5502026211739768,
3.772275682105759]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank0_Symbol_rank4(self):
arg0=Data(-3.5839426267,self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-2.9729696451374421, 2.7845056200381855, 0.070436437102223692, 0.66836223796868044],
[0.40381761203578836, -1.7869220467261826, -4.3681167712065552, 1.0762008553734699], [-3.4293067325266744,
-3.8959384230092855, -4.2869773308861872, -3.5982581222849266]], [[3.8085384127848325, -4.9902013750126919,
1.7025140755302903, -1.8585391591273237], [-1.8948326373524536, 2.0874520505745666, -1.8647114753321095,
3.9665649921657007], [-2.6617432109425376, -0.043781338271665859, -4.3924469058705498, -4.6038566089651081]]],
[[[4.1612414942039617, -0.24691459950937489, 1.8801077349311939, -4.0607604598486082], [-0.48975931816079132,
4.776651055544292, 2.5892649853139229, 2.6300466396994988], [-0.6331493645323949, -4.8747858313906498,
2.5714462579440713, -0.12625615907892662]], [[1.8766405716198298, 0.97931619405259518, -1.2333119307639082,
3.632140408148242], [0.96979041799351151, -4.0819837173164526, 3.4625138677193164, -1.7431511130821575],
[-2.7530992377422381, -3.1495479306859906, 1.3466227111831488, -2.3016323722421128]]], [[[-2.8378224290103491,
-0.7230057223129247, 0.95865498114414649, 0.14297561114879365], [2.3319242484901492, 4.9972541799736234,
-1.7121650896762564, 1.6097551517446558], [2.7133813837524077, -3.1913323682416994, -0.39896207531318861,
-3.2753783571190107]], [[1.3158800827274399, -0.034075573686918936, 3.2707189112070392, -2.9118211235462041],
[4.362994678434946, -3.2771781302292515, 3.4919565479064456, 1.6061522420425254], [-1.8973785117347788,
-4.4461539342202174, -3.8132486661529263, -0.74231592463494511]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-0.61097298156602342, -6.368448246741651, -3.6543790638056892, -4.252304864672146],
[-3.9877602387392539, -1.7970205799772829, 0.78417414450308964, -4.6601434820769354], [-0.15463589417679113,
0.31199579630581997, 0.70303470418272163, 0.014315495581461057]], [[-7.392481039488298, 1.4062587483092264,
-5.2864567022337559, -1.7254034675761418], [-1.689109989351012, -5.6713946772780321, -1.7192311513713561,
-7.5505076188691662], [-0.9221994157609279, -3.5401612884317997, 0.80850427916708423, 1.0199139822616425]]],
[[[-7.7451841209074272, -3.3370280271940906, -5.4640503616346594, 0.4768178331451427], [-3.0941833085426742,
-8.3605936822477567, -6.1732076120173884, -6.2139892664029643], [-2.9507932621710706, 1.2908432046871843,
-6.1553888846475369, -3.4576864676245389]], [[-5.4605831983232953, -4.5632588207560607, -2.3506306959395573,
-7.2160830348517075], [-4.553733044696977, 0.49804109061298707, -7.0464564944227819, -1.840791513621308],
[-0.83084338896122745, -0.43439469601747493, -4.9305653378866143, -1.2823102544613527]]], [[[-0.74612019769311644,
-2.8609369043905408, -4.542597607847612, -3.7269182378522592], [-5.9158668751936148, -8.5811968066770881,
-1.8717775370272092, -5.1936977784481213], [-6.2973240104558732, -0.39261025846176612, -3.1849805513902769,
-0.30856426958445482]], [[-4.8998227094309055, -3.5498670530165466, -6.8546615379105047, -0.67212150315726138],
[-7.9469373051384116, -0.306764496474214, -7.0758991746099111, -5.1900948687459909], [-1.6865641149686867,
0.8622113075167519, 0.22930603944946082, -2.8416267020685204]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([2.6649927252905226, 0.29496968217893382]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.03366663195)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([1.6313260933372291, -0.73869694977435962]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([3.9090880537794526, -3.9706193840215942]),self.functionspace)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-3.7233870114697742, 0.99043840493200186])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([7.6324750652492268, -4.9610577889535961]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[2.8033126273843685, 0.51509190965393792, 3.931306976936968, -3.3823534090429486,
-2.3486719525293087], [-2.9837425664154784, -2.4457160287299686, 3.8981965382683743, -0.89609359902144714,
4.1620406111464288], [3.6868893591462246, -2.9993029597001462, 1.8283120616948665, -2.0195573949932277,
-2.1640627499057361], [-2.9723279323425489, -4.8559061533246624, -1.0130455282709172, -3.7833351321644395,
3.514692525422209]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(4.86937457463)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[-2.0660619472497519, -4.3542826649801825, -0.93806759769715242, -8.2517279836770694,
-7.2180465271634286], [-7.8531171410495988, -7.315090603364089, -0.97117803636574607, -5.7654681736555675,
-0.70733396348769162], [-1.1824852154878958, -7.8686775343342665, -3.0410625129392539, -6.8889319696273486,
-7.0334373245398565], [-7.8417025069766693, -9.7252807279587827, -5.8824201029050371, -8.6527097067985608,
-1.3546820492119114]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[-1.1140360715186182, -1.5235600156934481, 4.3075103934286023, 4.6800377743432158,
-3.2505150436972521], [0.39123458636258768, 0.41088806870879768, -2.9614108446790501, 1.1049238977643405,
0.92166667279843395], [0.54565864417397059, -4.8476249672143004, 4.9444652981547943, 4.0252126389168215,
-3.9123423425216322], [-3.6777596228844844, -3.4408972758983558, 2.7718180074050611, -0.3997152204895924,
-0.16573647825956073]]),self.functionspace)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.4209487163246299, 1.3152643083131128, -0.71046464711788015, 0.21557543046364458,
-2.202065459251934], [-3.9101544501984198, -2.8682151089642827, 2.7125251197023488, 1.4173123031722534,
2.7246295240806209], [-1.5744991442525436, 3.0598215212654001, 0.63494427405471487, -4.906149376046594,
-1.6839564426436748], [4.0729555430880922, -0.83371622418680769, 0.46337987461630981, 4.0014755703742395,
-2.1103899940006032]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3069126448060118, -2.8388243240065609, 5.0179750405464825, 4.4644623438795712,
-1.0484495844453181], [4.301389036561007, 3.2791031776730803, -5.6739359643813989, -0.31238840540791291,
-1.8029628512821869], [2.1201577884265141, -7.9074464884797004, 4.3095210241000794, 8.9313620149634154,
-2.2283858998779573], [-7.7507151659725766, -2.6071810517115481, 2.3084381327887513, -4.4011907908638319,
1.9446535157410425]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-2.6064326776506652, 4.9989076052590633], [-3.0068821433777249, -3.1193113732509516]],
[[-1.3190483681618739, 3.9479827067009108], [1.0954417889014865, 4.6359051697534426]], [[-2.9778493741722056,
3.4845430816156977], [1.7569072943914552, 1.1616150547614428]], [[-0.91210869485198565, -1.3406976214361355],
[3.2217649968914159, -2.662260898242006]], [[4.1697693146337542, -1.1741423631833072], [-4.9803850608859115,
1.2700647554700222]], [[4.6074170359664368, 1.453706456526124], [0.20949339688511692,
3.0091215511346796]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-1.04145599079)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5649766868561219, 6.0403635960536066], [-1.9654261525831815, -2.0778553824564083]],
[[-0.27759237736733056, 4.9894386974954541], [2.1368977796960298, 5.6773611605479859]], [[-1.9363933833776623,
4.525999072410241], [2.7983632851859985, 2.2030710455559861]], [[0.12934729594255767, -0.29924163064159215],
[4.2632209876859593, -1.6208049074474626]], [[5.2112253054282975, -0.13268637238876391], [-3.9389290700913682,
2.3115207462645655]], [[5.6488730267609801, 2.4951624473206673], [1.2509493876796602,
4.0505775419292229]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[2.0075159970537113, 4.417162011434554], [0.71949384400506577, 1.0783048900035652]],
[[4.7614254606302335, -2.0888542276996978], [-3.5997702799671547, 4.2825487871951644]], [[-0.39389734575197544,
1.3283252585178928], [3.6919455158435834, -0.76277259642421402]], [[-4.4972180700076887, -3.7983795355307128],
[-0.26779668046970784, -0.79380221724008582]], [[-2.0572521505738273, -1.5154686544559368], [4.0972713376059851,
4.5986089620495108]], [[-1.3971821196462377, 0.16028646761807508], [-0.63755809097850857,
-3.3787710682197272]]]),self.functionspace)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[3.5103565349856751, 0.91526758558677379], [-3.7224124618951135, -0.27931399630195397]],
[[1.5813622936549105, 3.6172915696233972], [-1.2364412564258132, 0.16417768270487709]], [[0.64050559170122234,
4.6361361331624593], [-0.47839680540824325, -2.1615310941440589]], [[-0.85667930966756511, 1.669882578368358],
[0.22343162562157293, 0.80905790542025358]], [[-3.5873387244847543, 3.1163266795230058], [3.5553732672252671,
-4.6758779472194405]], [[3.6742958529176484, 0.58762359541383802], [1.5778519953325496, -0.39731537378910975]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-1.5028405379319638, 3.5018944258477802], [4.4419063059001793, 1.3576188863055192]],
[[3.180063166975323, -5.7061457973230949], [-2.3633290235413416, 4.1183711044902873]], [[-1.0344029374531978,
-3.3078108746445665], [4.1703423212518267, 1.3987584977198448]], [[-3.6405387603401236, -5.4682621138990708],
[-0.49122830609128076, -1.6028601226603394]], [[1.5300865739109271, -4.6317953339789426], [0.54189807038071791,
9.2744869092689513]], [[-5.0714779725638861, -0.42733712779576294], [-2.2154100863110582,
-2.9814556944306174]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[0.66483074145605592, 2.9129070748039982, -1.8655842911981346, -1.098354904466996],
[1.7426470733136448, -2.4896761957460898, 4.3864323453867851, -4.0781460331955177], [-0.62183708580819008,
-2.6186592235582786, -1.8750164189422014, -3.9631241880095969]], [[4.0419620323350909, 0.15536839603964836,
1.9771157591398101, -2.6101097405194453], [-4.7364297803535704, 1.8318126417179714, 3.2354822684907454,
2.2507758179659376], [-4.8699934080808029, -0.35744120243411981, 4.0908957400805122, -3.8440017446794084]]],
[[[4.5466344627836612, -2.8174576749848423, -0.32339288977492142, -3.3368918944053516], [3.3311423168153738,
-1.2448667289851647, -0.66737673743075376, -3.9953617725851598], [-4.8878412407428931, 3.1347720870691358,
-2.4390985397355847, -3.5615840737730475]], [[-3.7978882365989697, 4.345238312451805, 2.8310129832366435,
2.8564779239624674], [-0.85025481289091864, -4.3757742754757345, 3.5451710843902031, -2.5068001174158816],
[2.6943798866386315, 2.2746017608025317, -4.2655778273063607, 0.97165631163417387]]], [[[-2.9330039029788955,
4.3910413333213238, 2.5513441899802833, -3.8678703253194402], [-2.6748516851594308, -3.8887038302549062,
1.2485088138696518, -3.9629424578182251], [-0.38166273681210328, 3.82781593241344, -4.1817331752844087,
4.682478964767725]], [[-0.85849290617372809, -0.49338756563096275, -1.0480256440941615, -0.51008618582467946],
[-0.26820315453886501, 4.8354933917592806, 2.9555158912003154, -2.4766421456452479], [2.5098219987182944,
3.6215601735655589, -4.4497307132070123, -3.9295385075107028]]]]),self.functionspace)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.59361652138)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.2584472628375467, 5.506523596185489, 0.72803223018335617, 1.4952616169144948],
[4.3362635946951356, 0.10394032563540101, 6.9800488667682759, -1.4845295118140269], [1.9717794355733007,
-0.025042702176787834, 0.7186001024392894, -1.3695076666281061]], [[6.6355785537165817, 2.7489849174211392,
4.5707322805213009, -0.01649321913795454], [-2.1428132589720796, 4.4254291630994622, 5.8290987898722362,
4.8443923393474284], [-2.2763768866993122, 2.236175318947371, 6.6845122614620029, -1.2503852232979176]]],
[[[7.140250984165152, -0.22384115360335155, 2.2702236316065694, -0.74327537302386082], [5.9247588381968646,
1.3487497923963261, 1.926239783950737, -1.401745251203669], [-2.2942247193614023, 5.7283886084506266,
0.15451798164590613, -0.96796755239155674]], [[-1.2042717152174789, 6.9388548338332958, 5.4246295046181343,
5.4500944453439581], [1.7433617084905721, -1.7821577540942437, 6.1387876057716939, 0.08681640396560919],
[5.2879964080201223, 4.8682182821840225, -1.6719613059248699, 3.5652728330156647]]], [[[-0.33938738159740467,
6.9846578547028146, 5.1449607113617741, -1.2742538039379494], [-0.081235163777940045, -1.2950873088734154,
3.8421253352511426, -1.3693259364367343], [2.2119537845693875, 6.4214324537949308, -1.5881166539029179,
7.2760954861492158]], [[1.7351236152077627, 2.100228955750528, 1.5455908772873292, 2.0835303355568113],
[2.3254133668426258, 7.4291099131407714, 5.5491324125818062, 0.11697437573624292], [5.1034385200997852,
6.2151766949470497, -1.8561141918255215, -1.335921986129212]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_constData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.140332416756844, -4.5756565160935745, 1.0268217328307561, 1.594533973931731],
[4.1426026647673879, 0.1548614651600202, 3.351820863446946, 0.54777524679756073], [-4.6470169243406527,
-3.4101935702258368, 1.3604597013400213, -4.3236653508957374]], [[2.3543066928954612, 1.6355558219698443,
3.8590758340122093, 0.055467084597328409], [1.3949738751098479, -2.9042097100731445, 2.1331143130237962,
-0.45715627400394165], [3.9505052117900146, -4.8644226435153097, 0.13641466419900183, 0.92434447564323374]]],
[[[-4.2036478385109302, -2.2096856472681958, -3.309442061812593, -0.17761420723311439], [-4.5417481392819026,
3.354117107537796, 2.9925164896060084, 4.231145636082223], [-4.3165407391400308, -0.16204594013147311,
-1.5308101185053733, 3.7017204822457384]], [[2.4648028362561725, 0.43817614121240833, -4.4908194091317366,
-0.081928750874263656], [-3.4087689978816016, 4.259133980931324, -4.2850896710829334, 4.6395735766216326],
[-1.3584480043808989, -4.7738821023855085, -1.2617431337636842, -1.2598313032270116]]], [[[2.2708892792624855,
1.9132737394453327, -0.50215367058696003, 0.19108419265161469], [-2.0796597802531669, 1.1505151966811367,
1.2957662425378791, -1.5883201097665802], [-1.7035021892623838, 4.8639671345493021, 3.1243484697100534,
0.47610495992410051]], [[-4.0444287366693015, -1.3614006776767349, -0.18268931922481002, 4.8063591217845332],
[3.1407426206783704, 2.8940879164962441, -4.9664997014592807, 1.6951588068340158], [-3.895479459710558,
1.7220903215355694, -3.7165673657855267, 3.1903385713544257]]]]),self.functionspace)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-4.3482304868754991, -1.2480666735558845, 0.43538858115159051, -2.0858236027245205],
[-2.442305699452354, 2.0213192586154003, -2.5262404161243679, -4.458062700052194], [0.26228138879138641,
-2.6430658161459242, -4.7246503759525602, 4.2538788761081854]], [[-1.6124403577544308, -1.8284497197976037,
-3.0160374139385002, 2.7523938918136759], [1.4437250527651582, -2.7814473787336489, 3.5116683735594361,
-3.9808640616716562], [1.7054962689298705, 4.7974185413341068, 1.9447068850818283, -1.2797130952071156]]],
[[[3.7642823106611107, 0.11145650212965919, -0.096799862214571597, 2.0215787533002523], [0.26390717935294816,
0.12612295721321498, 4.0275730341758482, -1.2268861937462172], [-2.947926663434548, -1.4514539315574626,
2.4550945474164232, -2.7897655841602651]], [[-1.5947829088079746, 0.80620330852535815, -4.5614285986030234,
-1.9102368071164841], [2.0807019362652692, -4.099640999530064, -1.8395330667711352, -4.6367501410986929],
[-2.5162327168837786, 4.6954385782651951, -2.1576821461704854, -1.62194811763983]]], [[[0.06729391952569852,
-0.57919376543293488, -3.1838952254737416, 1.7056529660452817], [3.6116233555564143, 0.81964000588296315,
-0.16440769780998377, 0.079355513141521783], [2.9805073823987431, 1.3188532056435962, 3.4153481616516537,
-2.5138710663982189]], [[2.8884594089569315, 1.1351683507610142, -0.68804270946144719, -4.7325886514124882],
[1.1204800401276476, 0.55566378590737031, 0.94240513232859335, 2.9610440134171334], [-2.6222587774463815,
-4.4048348584786705, -0.29650368246657699, -1.0078523107846902]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[6.4885629036323431, -3.32758984253769, 0.59143315167916555, 3.6803575766562515],
[6.5849083642197419, -1.8664577934553801, 5.8780612795713143, 5.0058379468497547], [-4.9092983131320391,
-0.76712775407991263, 6.0851100772925815, -8.5775442270039228]], [[3.9667470506498921, 3.464005541767448,
6.8751132479507095, -2.6969268072163475], [-0.048751177655310229, -0.12276233133949566, -1.3785540605356399,
3.5237077876677145], [2.2450089428601441, -9.6618411848494166, -1.8082922208828265, 2.2040575708503494]]],
[[[-7.9679301491720409, -2.321142149397855, -3.2126421995980214, -2.1991929605333667], [-4.8056553186348507,
3.227994150324581, -1.0350565445698399, 5.4580318298284407], [-1.3686140757054828, 1.2894079914259895,
-3.9859046659217965, 6.4914860664060035]], [[4.0595857450641475, -0.36802716731294982, 0.070609189471286804,
1.8283080562422205], [-5.4894709341468708, 8.3587749804613871, -2.4455566043117982, 9.2763237177203255],
[1.1577847125028797, -9.4693206806507035, 0.89593901240680118, 0.3621168144128184]]], [[[2.203595359736787,
2.4924675048782676, 2.6817415548867816, -1.514568773393667], [-5.6912831358095808, 0.33087519079817351,
1.4601739403478629, -1.667675622908102], [-4.684009571661127, 3.5451139289057059, -0.29099969194160025,
2.9899760263223194]], [[-6.932888145626233, -2.4965690284377491, 0.50535339023663717, 9.5389477731970214],
[2.0202625805507228, 2.3384241305888738, -5.908904833787874, -1.2658852065831177], [-1.2732206822641765,
6.1269251800142399, -3.4200636833189497, 4.1981908821391158]]]]),self.functionspace)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank0(self):
arg0=Data(-2.29417952191,self.functionspace)
arg0.setTaggedValue(1,-4.27612309963)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-2.86386679086)
sub=res.substitute({arg1:s1})
ref=Data(0.569687268944,self.functionspace)
ref.setTaggedValue(1,-1.41225630877)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank1(self):
arg0=Data(-4.72691427991,self.functionspace)
arg0.setTaggedValue(1,0.483106242273)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([-0.58516003749737244, 2.93231182282255])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-4.1417542424175267, -7.6592261027374491]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0682662797700972, -2.4492055805498252]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank2(self):
arg0=Data(4.84060376911,self.functionspace)
arg0.setTaggedValue(1,-3.32867505476)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[3.5332516865172998, 4.2256878903288939, -4.6404295927681405, 4.9721874322243114,
-1.5545932240349902], [0.40603544670242542, -2.879718425724147, -2.1385047584627337, 4.6127992237598132,
0.57646645021785048], [-2.6334801212800754, -2.3655947826469701, 0.48086858542515643, 1.0360291664664301,
-3.4378490059536082], [-0.23853194944872236, -2.0363663305583768, -2.3289186751171798, 3.5102407359843486,
4.1303419895739388]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[1.3073520825884426, 0.6149158787768485, 9.4810333618738838, -0.13158366311856895,
6.3951969931407326], [4.434568322403317, 7.7203221948298895, 6.9791085275684761, 0.2278045453459292,
4.2641373188878919], [7.4740838903858178, 7.2061985517527125, 4.359735183680586, 3.8045746026393124,
8.2784527750593497], [5.0791357185544648, 6.8769700996641188, 7.1695224442229222, 1.3303630331213938,
0.71026177953180358]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-6.8619267412736988, -7.5543629450852929, 1.3117545380117415,
-8.3008624869807104, -1.7740818307214088], [-3.7347105014588244, -0.44895662903225197, -1.1901702962936653,
-7.9414742785162122, -3.9051415049742495], [-0.69519493347632366, -0.96308027210942893, -3.8095436401815554,
-4.3647042212228291, 0.10917395119720918], [-3.0901431053076767, -1.2923087241980222, -0.99975637963921926,
-6.8389157907407476, -7.4590170443303379]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank3(self):
arg0=Data(-3.20552188916,self.functionspace)
arg0.setTaggedValue(1,-0.473083670166)
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.71230320805011704, -3.008236723891188], [0.81066003773158002, -3.6043239509733382]],
[[3.691034498943317, -3.3919882986743777], [0.84551364067512935, 3.3207859438709946]], [[0.41963337446652105,
-3.6038224020133991], [-2.3537235378574151, -3.7120927558232997]], [[-3.4588851001838727, -0.31880183563871789],
[-1.3379489058063267, -3.9118810181560226]], [[4.4984539881701195, -3.2158956295350851], [1.5013508852420685,
2.8717656529358955]], [[-0.13701019263353231, -3.1176264463626078], [-1.67955120335195, 4.317481449568719]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.917825097207726, -0.19728516526642093], [-4.016181926889189, 0.3988020618157293]],
[[-6.896556388100926, 0.18646640951676874], [-4.0510355298327383, -6.5263078330286035]], [[-3.62515526362413,
0.39830051285579016], [-0.85179835130019388, 0.50657086666569073]], [[0.2533632110262638, -2.886720053518891],
[-1.8675729833512822, 0.70635912899841369]], [[-7.7039758773277285, 0.010373740377476182], [-4.7068727743996774,
-6.0772875420935044]], [[-3.0685116965240766, -0.087895442795001166], [-1.525970685805659,
-7.523003338726328]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-1.1853868782160886, 2.5351530537252165], [-1.2837437078975515,
3.1312402808073667]], [[-4.1641181691092886, 2.9189046285084062], [-1.3185973108411009, -3.7938696140369661]],
[[-0.89271704463249257, 3.1307387318474276], [1.8806398676914435, 3.2390090856573281]], [[2.9858014300179012,
-0.15428183452725364], [0.86486523564035522, 3.4387973479900511]], [[-4.9715376583360911, 2.7428119593691136],
[-1.97443455540804, -3.344849323101867]], [[-0.33607347753243921, 2.6445427761966362], [1.2064675331859784,
-4.7905651197346906]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank0_Symbol_rank4(self):
arg0=Data(-0.215341183726,self.functionspace)
arg0.setTaggedValue(1,-3.01917111711)
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[3.1718058337950783, -4.3218518167555349, 4.7360170033398816, 2.6415781893387447],
[1.7953624357215787, 0.37239845986582054, 0.85595953231170441, -4.2093909477304852], [-4.0724848735753412,
-2.3789549933876364, 3.8266481046469991, -4.4686983670793881]], [[-1.3807814097985793, -0.9345570079736385,
3.2111606830229267, 2.5248569160832579], [-0.19847478717542089, 3.6200277417416071, -1.3367301493578787,
-1.9914051287776093], [4.2384277387383236, -3.1625190831895669, -4.8267032630177118, -3.7590986361039294]]],
[[[-0.96721285038350846, 0.23717549644533698, -2.0558971771798862, -2.1889488119398925], [2.1163450477817447,
-4.308535473047935, 0.96468545582662735, 0.58036767508710252], [-0.26889479983427034, -4.6749066439752021,
-2.6908936581627731, 3.3090528029139286]], [[1.0683391958055246, -4.3705975019062535, 4.6959723711804546,
-0.58815635047014858], [-1.7921642772643898, 2.8079866307247423, 4.5837878995413348, -3.6656523242301429],
[2.1083853748587442, -0.44280454111162726, -2.5427523262585563, 3.9551312168955626]]], [[[4.0479839543530591,
1.694708528108122, -1.8081650371476021, 2.5627212563151982], [2.9443513555348222, -3.4330381296191126,
-2.3471872352829837, 2.9291777099369405], [0.92208424820838264, -1.7857214370413055, 3.2638247404414695,
3.3713981402987798]], [[-2.3853121535462418, 2.1417428055374232, 3.1558224539661612, -4.4802179321245248],
[-3.0197245205703069, 2.7624146301708477, -4.6790033997765104, -4.0453165901737584], [4.8295161047601614,
-3.5764718373510842, 4.356981591617421, -4.7034098127513264]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[-3.3871470175211567, 4.1065106330294565, -4.95135818706596, -2.856919373064823],
[-2.0107036194476571, -0.5877396435918989, -1.0713007160377828, 3.9940497640044068], [3.8571436898492628,
2.163613809661558, -4.0419892883730775, 4.2533571833533097]], [[1.165440226072501, 0.71921582424756014,
-3.426501866749005, -2.7401980998093363], [-0.01686639655065747, -3.8353689254676855, 1.1213889656318003,
1.776063945051531], [-4.4537689224644019, 2.9471778994634885, 4.6113620792916334, 3.543757452377851]]],
[[[0.7518716666574301, -0.45251668017141533, 1.8405559934538078, 1.9736076282138142], [-2.3316862315078231,
4.0931942893218567, -1.1800266395527057, -0.79570885881318087], [0.053553616108191981, 4.4595654602491237,
2.4755524744366948, -3.5243939866400069]], [[-1.283680379531603, 4.1552563181801752, -4.911313554906533,
0.37281516674407023], [1.5768230935383114, -3.0233278144508207, -4.7991290832674132, 3.4503111405040645],
[-2.3237265585848226, 0.2274633573855489, 2.3274111425324779, -4.1704724006216409]]], [[[-4.2633251380791375,
-1.9100497118342004, 1.5928238534215238, -2.7780624400412766], [-3.1596925392609005, 3.2176969458930342,
2.1318460515569053, -3.1445188936630188], [-1.137425431934461, 1.5703802533152271, -3.4791659241675479,
-3.5867393240248582]], [[2.1699709698201635, -2.3570839892635016, -3.3711636376922396, 4.2648767483984464],
[2.8043833368442286, -2.977755813896926, 4.463662216050432, 3.8299754064476801], [-5.0448572884862397,
3.3611306536250058, -4.5723227753434994, 4.4880686290252481]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-6.1909769509085075, 1.3026806996421056, -7.7551881204533109,
-5.6607493064521739], [-4.8145335528350079, -3.3915695769792498, -3.8751306494251336, 1.1902198306170559],
[1.0533137564619119, -0.64021612372579284, -6.8458192217604283, 1.4495272499659588]], [[-1.6383897073148499,
-2.0846141091397907, -6.2303318001363559, -5.5440280331966871], [-2.8206963299380083, -6.6391988588550364,
-1.6824409677555505, -1.0277659883358199], [-7.2575988558517528, 0.14334796607613765, 1.8075321459042826,
0.73992751899050013]]], [[[-2.0519582667299208, -3.2563466135587662, -0.96327393993354304, -0.83022230517353668],
[-5.1355161648951739, 1.2893643559345058, -3.9838565729400566, -3.5995387922005317], [-2.7502763172791589,
1.6557355268617728, -0.32827745895065608, -6.3282239200273578]], [[-4.0875103129189538, 1.3514263847928243,
-7.7151434882938839, -2.4310147666432806], [-1.2270068398490395, -5.8271577478381715, -7.602959016654764,
0.64648120711671364], [-5.1275564919721734, -2.576366576001802, -0.47641879085487293, -6.9743023340089918]]],
[[[-7.0671550714664884, -4.7138796452215512, -1.2110060799658271, -5.5818923734286274], [-5.9635224726482514,
0.41386701250568336, -0.67198388183044555, -5.9483488270503697], [-3.9412553653218119, -1.2334496800721237,
-6.2829958575548988, -6.390569257412209]], [[-0.63385896356718741, -5.1609139226508525, -6.1749935710795905,
1.4610468150110956], [0.0005534034568777102, -5.7815857472842769, 1.6598322826630811, 1.0261454730603292],
[-7.8486872218735906, 0.55730072023765498, -7.3761527087308503, 1.6842386956378972]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank0(self):
arg0=Data(numpy.array([3.3101673523710691, 0.048409361416743124]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([0.70887806236646611, -0.73932065177372408]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(1.15960287006)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([2.1505644823090515, -1.1111935086452744]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.45072480769555145, -1.8989235218357416]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank1_Symbol_rank1(self):
arg0=Data(numpy.array([-2.0708546339036071, 2.2714034647505121]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([-0.16265022615439584, -0.29272834777410406]))
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.8495632665872739, -2.2808524667130694])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([-3.920417900490881, 4.5522559314635815]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-2.0122134927416697, 1.9881241189389653]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank0(self):
arg0=Data(numpy.array([[4.703380807076492, -4.2567944639019304, -2.0784707905046593, 0.18023637488621791,
1.1164321428411501], [3.3809585074696322, 1.5795463086222137, 1.5300027430790495, -1.6695215658775489,
-4.9671698822372887], [-0.56875186129757704, -0.88988163011215704, 1.0953422249288387, 1.2629450835517639,
1.9829321534877584], [-2.3470243950738103, -1.5345245349366401, 1.7913793425402638, 3.2778179482022125,
3.2743088989127749]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[2.1331140495285128, 4.902243346193929, -3.8569193535703947,
-1.2051025219030698, 4.8526791592750644], [-1.9285295160668192, -2.2715983725035862, -1.6280809153232632,
0.63571110979312273, -4.5616322454088643], [1.1933837591252878, -2.4657544917793928, 3.8511059475300904,
-3.0018611957635444, 3.560382804940847], [-4.284584247208282, -4.3366343606789348, 3.6048395763720524,
-2.2301793774115106, 4.6397261587379131]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0560012612314)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[4.6473795458450571, -4.3127957251333653, -2.1344720517360942, 0.12423511365478301,
1.0604308816097152], [3.3249572462381973, 1.5235450473907788, 1.4740014818476146, -1.7255228271089837,
-5.0231711434687236], [-0.62475312252901194, -0.94588289134359194, 1.0393409636974038, 1.206943822320329,
1.9269308922563235], [-2.4030256563052452, -1.590525796168075, 1.7353780813088289, 3.2218166869707776,
3.21830763768134]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[2.0771127882970779, 4.8462420849624941, -3.9129206148018296,
-1.2611037831345047, 4.7966778980436295], [-1.9845307772982541, -2.3275996337350211, -1.6840821765546981,
0.57970984856168783, -4.6176335066402991], [1.1373824978938529, -2.5217557530108277, 3.7951046862986555,
-3.0578624569949793, 3.5043815437094121], [-4.3405855084397169, -4.3926356219103697, 3.5488383151406175,
-2.2861806386429455, 4.5837248975064782]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank2_Symbol_rank2(self):
arg0=Data(numpy.array([[0.044613582775737015, -0.22965054883260905, -3.3954728255423361, -0.043404784226975579,
-0.81018025865095922], [4.0980455142640473, 3.3299876326958326, 4.4694158188546833, 0.047800124529065791,
-4.1128886475115927], [-0.86793714814288414, 3.7852706993586231, 2.8168181178475837, -2.6081900317073039,
1.795227525921204], [-2.7964436060814792, 2.46599228887926, -4.3894587372918519, -3.0809581135280197,
4.5629513161933648]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[0.18467263707487369, -2.906541382403959, -4.2471361917218733,
1.7478696798949915, -2.0555035204044225], [-4.1703824796767011, -0.58145273211245829, -1.3034416354534684,
-4.4238643252257699, -3.0019960418182654], [-0.011560599410600503, 4.5614736908410478, -4.1865499712522745,
0.41611035316936196, 1.4719370557053075], [3.3285499812876207, 4.2147545548351992, 3.8796865015190463,
-2.8665673368928459, 3.8754754018195001]]))
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-0.34040680852948757, 0.51480179015857086, 2.6579250902566542, -3.8908104282358877,
-1.0766494604779266], [-1.7785348143550985, 1.7875285221080928, -0.26464821727786259, 3.7856697734154743,
0.14935084548977784], [1.6454427368239299, -3.0878902261983701, 2.1577262475041596, -3.540342914142153,
2.8529020416879671], [2.8849125795379305, -3.1409630887157123, -0.30215664293811351, 3.5493007526176896,
0.27226779139430857]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[0.38502039130522459, -0.74445233899117991, -6.0533979157989908, 3.8474056440089122,
0.26646920182696743], [5.8765803286191458, 1.5424591105877399, 4.7340640361325459, -3.7378696488864085,
-4.2622394930013705], [-2.5133798849668141, 6.8731609255569932, 0.65909187034342409, 0.93215288243484906,
-1.0576745157667631], [-5.6813561856194097, 5.6069553775949723, -4.0873020943537384, -6.6302588661457094,
4.2906835247990562]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.52507944560436126, -3.4213431725625298, -6.9050612819785275,
5.6386801081308793, -0.97885405992649588], [-2.3918476653216025, -2.3689812542205511, -1.0387934181756058,
-8.2095340986412442, -3.1513468873080432], [-1.6570033362345304, 7.6493639170394179, -6.3442762187564341,
3.9564532673115149, -1.3809649859826596], [0.44363740174969024, 7.3557176435509115, 4.1818431444571598,
-6.4158680895105356, 3.6032076104251916]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank0(self):
arg0=Data(numpy.array([[[-0.70323441272603926, -1.4205742401701604], [-3.6004008923276585, 4.1739347100888349]],
[[-2.7687391296703767, -0.96114141211843496], [0.45711266950319906, 0.36713165606152121]], [[3.8726070188081287,
2.6611494194452137], [-0.28060302358441547, 1.0399275995737964]], [[2.5912385881777, -0.12172669528696911],
[1.831517522951442, -4.9891623764024926]], [[3.8572507842255241, 2.9719918728052663], [0.42882676434271261,
-1.4826468418372341]], [[0.16110396579090835, 4.8052378752678955], [2.4890225545274554,
-1.4594734254395068]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[3.4601998637619467, 3.5105292543746671], [-1.9715134513187751,
1.6897677346566677]], [[0.99895689216195205, 3.7908023259957879], [-2.9811497902134496, 0.46336396583979944]],
[[-2.0979181014824011, 0.68992077008736707], [4.5817275596392033, 3.1112543881649586]], [[-1.0666850119171398,
-3.7136243224538679], [-2.1842168128700248, -0.60998709362389292]], [[-1.0817587775668578, 1.1357523207967555],
[0.72114300996433212, 2.0871085948686607]], [[2.6196090777455074, -4.8403131105182826], [4.4462612480444346,
2.6275786734235638]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(3.40075496466)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-4.1039893773891789, -4.8213292048333001], [-7.0011558569907981, 0.77317974542569523]],
[[-6.1694940943335164, -4.3618963767815746], [-2.9436422951599406, -3.0336233086016184]], [[0.4718520541449891,
-0.73960554521792599], [-3.6813579882475551, -2.3608273650893432]], [[-0.80951637648543961, -3.5224816599501088],
[-1.5692374417116977, -8.3899173410656331]], [[0.4564958195623845, -0.42876309185787331], [-2.971928200320427,
-4.8834018065003733]], [[-3.2396509988722313, 1.4044829106047558], [-0.91173241013568429,
-4.8602283901026464]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.059444899098807014, 0.10977428971152747], [-5.3722684159819147,
-1.7109872300064719]], [[-2.4017980725011876, 0.39004736133264828], [-6.3819047548765893, -2.9373909988233402]],
[[-5.4986730661455407, -2.7108341945757726], [1.1809725949760637, -0.28950057649818106]], [[-4.4674399765802795,
-7.1143792871170071], [-5.5849717775331644, -4.0107420582870326]], [[-4.4825137422299974, -2.2650026438663842],
[-2.6796119546988075, -1.3136463697944789]], [[-0.7811458869176322, -8.2410680751814223], [1.0455062833812949,
-0.77317629123957587]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank3_Symbol_rank3(self):
arg0=Data(numpy.array([[[-2.8893927498914151, -3.9495986710021471], [2.0674301637688552, -4.9323681378020368]],
[[-3.9365223323164567, -3.9166796931279513], [-2.1295831296849688, 0.049270642730291137]], [[1.1604521699930164,
-4.7263968957110194], [0.18403419227820805, -3.9919770732677948]], [[-4.4683480884742268, 3.1077188243660192],
[0.090355977211302729, -0.013539049772621325]], [[1.2239143556433882, 4.66468811676115], [4.6443599318212119,
2.902664355759085]], [[3.1499666861977964, 3.5678517696258449], [0.73557701807290599,
-4.1703133219986768]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[0.62745401025262382, 0.69024538347902542], [4.3685303267738433,
2.2109723240557235]], [[-0.7348498808881363, -2.7513236139357309], [2.5887407011037489, 4.1931952710033542]],
[[2.1336250254996258, -2.1610465999144091], [-4.054796877122568, 0.054975312915938268]], [[2.8778982280083021,
0.031841424972327559], [-1.6040852288365626, -0.14653197703489251]], [[1.0241081083490533, 2.0236436389548764],
[-4.7683548819587331, 0.81201234013234735]], [[-3.2923450240347405, 2.2531528995219965], [-3.594199051432386,
-1.9523442452177875]]]))
arg1=Symbol(shape=(6, 2, 2))
res=arg0-arg1
s1=numpy.array([[[0.67454553417657603, 2.9833990689244789], [-3.9375622829117427, 0.0094498156860893801]],
[[2.1574617938010734, -0.48892733726965609], [0.62118276066421352, 0.99065918564407696]], [[1.7968244154456219,
-1.6314349433046926], [1.8612952961850224, 4.6630470176393288]], [[0.43763307675500052, 4.0271951272236688],
[-1.1711764825930993, -4.5547560714878275]], [[2.514477748308436, 3.7600620047710827], [1.5805136896170069,
2.4948517124974012]], [[-0.74781838229224817, -2.9876928953003903], [4.1339271192034222, 4.4719827170790509]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[-3.5639382840679912, -6.932997739926626], [6.004992446680598, -4.9418179534881261]],
[[-6.0939841261175296, -3.4277523558582952], [-2.7507658903491823, -0.94138854291378582]], [[-0.63637224545260551,
-3.0949619524063268], [-1.6772611039068144, -8.6550240909071228]], [[-4.9059811652292273, -0.91947630285764959],
[1.261532459804402, 4.5412170217152061]], [[-1.2905633926650477, 0.90462611199006737], [3.063846242204205,
0.40781264326168376]], [[3.8977850684900446, 6.5555446649262352], [-3.3983501011305162,
-8.6422960390777277]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.047091523923952217, -2.2931536854454535], [8.3060926096855852,
2.2015225083696341]], [[-2.8923116746892097, -2.2623962766660748], [1.9675579404395354, 3.2025360853592773]],
[[0.33680061005400397, -0.52961165660971643], [-5.9160921733075904, -4.6080717047233906]], [[2.4402651512533016,
-3.9953537022513412], [-0.43290874624346332, 4.4082240944529349]], [[-1.4903696399593827, -1.7364183658162062],
[-6.34886857157574, -1.6828393723650539]], [[-2.5445266417424923, 5.2408457948223868], [-7.7281261706358082,
-6.4243269622968384]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank0(self):
arg0=Data(numpy.array([[[[3.1002455029763922, 2.6515488300516923, -0.77582358496211956, -3.4443694355246803],
[-2.6599091620789581, -0.70044327546902529, -4.3223485855396966, 4.9338402947088049], [-4.5546987200991147,
-4.159833516760548, -1.2113818643763619, 1.341501344402797]], [[-0.99132126989665803, -3.81966827017445,
-1.5631671743562592, -2.9170370396917167], [0.94015514336519956, -4.5328623228274036, 2.5469993786586862,
4.5298447080413311], [-1.8826808741220304, -0.21100480137345734, -1.7750931594239239, -3.5343470478632764]]],
[[[-3.4624410933639691, 3.7419877938482422, -4.1641241285521557, -2.8763768520849711], [4.3838179808162643,
-0.076650368742670949, -2.2790272387608601, 1.4407514353417152], [-0.58059366739859364, 3.0282179950037378,
4.3946428646333242, -3.9361840734571896]], [[-0.40769305246403231, -0.93123230765280152, -3.5500981163613665,
-1.4382421516555786], [0.18862577968690264, 3.8234595158976035, 1.2783334948832605, -0.84599833008897818],
[-1.5452449895609535, -2.1285283532469434, 2.9517034908101669, -1.043778516582341]]], [[[2.5188074736534176,
4.926760464276164, -1.2494158315784532, -4.1847607799981805], [1.764772573553314, 4.6090994448443769,
-3.7864884573437072, 2.5743244083963681], [-0.44624416686502322, -0.44288726525437028, -2.5180469174818598,
-4.8009656021603]], [[-1.0967276921708047, -1.5639987059537273, -3.3122649580537331, -3.947879272385495],
[4.1267460589959857, -4.5801997177900287, 0.85366271506547697, -3.5573421152778972], [-4.7127368302025108,
-4.5592524679039892, -1.8586387462495613, -3.2614675219884837]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[-3.6140016210408508, -4.1545999292001445, 4.9863169403898908,
-2.2007289242442383], [-2.3634275248295822, 1.4929955627211893, 1.1905831175627091, -3.1298255396253936],
[-0.78867439130174599, -2.5664248245819756, -1.882393556334109, -2.3300345925878529]], [[3.7578772846055983,
-1.9632657478837121, -1.3792653830852455, -0.23840250166856869], [-1.650781665029756, -3.2744446113480907,
-1.2541229166086589, -2.3471598629273149], [-1.939332795628903, 0.81542234976851624, 0.52422540705571663,
0.91808367692950554]]], [[[-3.0689349511345867, -4.8032602579819264, 3.769084882991141, -1.5864959564378189],
[-3.2063200431555905, -0.3347729502698602, 1.763270929850381, 0.65936335478094321], [-3.6143633139881959,
0.15424644431103118, 3.7156782910709154, -3.2826914978804203]], [[-0.091940996157960697, 2.5331247115220021,
3.4383904670893202, 0.77887041122794898], [4.2850997491436988, 3.3877021574758341, 3.9303516193668084,
0.97217787674818279], [-1.8219977615256742, 3.7582967180633755, -3.967674705101544, 3.2183851949652524]]],
[[[3.8000102844693906, -2.9266220460152672, 0.11901081743168795, -0.70455205529677301], [4.6787843021952913,
-3.2637583894745239, 4.6693989140352041, 2.042172937625808], [-2.9445501417858964, 0.36254085518902812,
2.8333171427728354, -2.7757509476245721]], [[3.8180860212706147, -3.4817247466262815, -3.2683613783585006,
-2.0706219843820262], [4.8065072235822566, 2.2788211866672707, 3.8562835841415382, -1.1633706258500731],
[2.652336823163191, -2.6060953909144513, 0.62089818312127321, -1.6242126976534612]]]]))
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(-4.55573857649)
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[7.6559840794689205, 7.2072874065442205, 3.7799149915304087, 1.1113691409678479],
[1.8958294144135701, 3.8552953010235029, 0.23338999095283164, 9.4895788712013331], [0.0010398563934135296,
0.3959050597319802, 3.3443567121161664, 5.8972399208953252]], [[3.5644173065958702, 0.73607030631807824,
2.992571402136269, 1.6387015368008115], [5.4958937198577278, 0.02287625366512458, 7.1027379551512144,
9.0855832845338593], [2.6730577023704978, 4.3447337751190709, 2.7806454170686044, 1.0213915286292519]]],
[[[1.0932974831285591, 8.2977263703407704, 0.39161444794037248, 1.6793617244075572], [8.9395565573087925,
4.4790882077498573, 2.2767113377316681, 5.9964900118342435], [3.9751449090939346, 7.583956571496266,
8.9503814411258524, 0.61955450303533866]], [[4.1480455240284959, 3.6245062688397267, 1.0056404601311617,
3.1174964248369497], [4.7443643561794309, 8.3791980923901317, 5.8340720713757888, 3.70974024640355],
[3.0104935869315748, 2.4272102232455848, 7.5074420673026951, 3.5119600599101872]]], [[[7.0745460501459458,
9.4824990407686922, 3.3063227449140751, 0.3709777964943477], [6.3205111500458422, 9.1648380213369052,
0.76925011914882102, 7.1300629848888963], [4.109494409627505, 4.1128513112381579, 2.0376916590106684,
-0.24522702566777177]], [[3.4590108843217235, 2.991739870538801, 1.2434736184387951, 0.60785930410703326],
[8.6824846354885139, -0.024461141297500433, 5.4094012915580052, 0.99839646121463099], [-0.15699825370998255,
-0.0035138914114609676, 2.697099830242967, 1.2942710545040446]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.94173695545167746, 0.40113864729238369, 9.542055516882419,
2.35500965224829], [2.192311051662946, 6.0487341392137175, 5.7463216940552373, 1.4259130368671347],
[3.7670641851907822, 1.9893137519105526, 2.6733450201584192, 2.2257039839046753]], [[8.3136158610981266,
2.5924728286088161, 3.1764731934072827, 4.3173360748239595], [2.9049569114627722, 1.2812939651444375,
3.3016156598838693, 2.2085787135652133], [2.6164057808636252, 5.3711609262610445, 5.0799639835482449,
5.4738222534220338]]], [[[1.4868036253579415, -0.24752168148939813, 8.3248234594836692, 2.9692426200547093],
[1.3494185333369377, 4.220965626222668, 6.3190095063429093, 5.2151019312734714], [0.94137526250433234,
4.7099850208035594, 8.2714168675634436, 1.273047078612108]], [[4.4637975803345675, 7.0888632880145304,
7.9941290435818484, 5.3346089877204772], [8.8408383256362271, 7.9434407339683624, 8.4860901958593367,
5.527916453240711], [2.7337408149668541, 8.3140352945559037, 0.58806387139098426, 7.7741237714577807]]],
[[[8.3557488609619188, 1.629116530477261, 4.6747493939242162, 3.8511865211957552], [9.2345228786878195,
1.2919801870180043, 9.2251374905277324, 6.5979115141183362], [1.6111884347066319, 4.9182794316815563,
7.3890557192653636, 1.7799876288679561]], [[8.3738245977631429, 1.0740138298662467, 1.2873771981340276,
2.4851165921105021], [9.3622458000747848, 6.834559763159799, 8.4120221606340664, 3.3923679506424551],
[7.2080753996557192, 1.9496431855780769, 5.1766367596138014, 2.931525878839067]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_taggedData_rank4_Symbol_rank4(self):
arg0=Data(numpy.array([[[[2.1869721643026576, 0.35542091272423715, 2.5099944114031967, 4.7276012581949995],
[-0.23596027111215712, 3.2557128306673206, -2.4174678213407566, 4.9025765849007588], [3.4987602616867228,
-2.3969967727517094, 2.614715035832643, -3.9538109091356577]], [[0.54151166641114745, 4.3433313907072311,
-3.9824411189395126, 0.11193040884063787], [-4.3326960505433521, -2.6555021449849603, -1.6650005107909016,
-0.21278258756168267], [2.9438726263016104, 4.614591333740627, -1.4283352855346321, 4.195747529596801]]],
[[[0.4129039465707498, 0.25218586208094607, 4.2227877593235625, -3.8395686827717723], [-4.246422814789943,
-4.2708029152046789, -4.4791253262093615, 2.3703854064691221], [-0.32074671911367325, -4.0633264555676574,
-4.8034904727622223, 0.101245496731595]], [[3.3860052077100544, 4.4048456672981686, 3.3258905421337257,
-0.60591078242426555], [2.9574702297232829, 2.9390786518156196, 3.0627580449874809, -2.1902821038190523],
[1.2765769390449559, 4.5442832941192819, 0.47031486471564055, -3.2094801674304509]]], [[[1.4972627407797212,
-2.7514173987810633, 0.19744444113354387, 1.3720920976100972], [-3.147124860705004, -3.6707691951555885,
1.1521564952279704, -0.12493802519996233], [1.3717811158015873, -1.737983464544548, -2.5919544001996897,
-4.4195022009129206]], [[-3.5078213357756582, 1.5909514876001909, 3.932618549290213, 0.32844467348406869],
[-0.037083415286228494, 2.358949404615915, -3.7082781631298478, -4.9441324919087766], [1.219588665287433,
-2.1155364750524797, 2.3443039764677165, 4.1618790582351313]]]]),self.functionspace)
arg0.setTaggedValue(1,numpy.array([[[[3.8216987557975131, -0.59039813916696193, -1.9474433412604117,
4.1666345075852202], [1.0033840403657788, -1.8365638623400207, -1.1472895447555285, 0.49043998461267968],
[1.525782098623524, 0.98710575843395354, 1.9521603305269073, 1.4982217977497818]], [[4.8105014981222372,
0.18255767851204219, 0.10092997041413909, 2.3610713615733667], [3.8639541584797801, 1.8455276769077198,
3.9278199867001007, 2.5501176762845867], [3.2925051662999447, 0.78129602184334157, -0.73105877010655362,
2.9378923845982694]]], [[[1.3162347911484948, -1.7534583809398363, -4.4745574675152744, 0.84388146264593455],
[-2.1398633576757309, 1.6224556269216279, 4.0151064679341637, 0.81646760002277574], [0.95506629968888479,
-3.384786519820715, 2.08961451298733, 1.4802214615087061]], [[2.5752388025402837, -2.7094797245847468,
-2.6808155024703106, -1.7780191613070642], [-0.58755728186204248, -4.3097624692690948, 3.6757907841395685,
-1.8312242243207608], [-3.7229135985460826, -1.5786991892133564, 2.6894504757052617, -0.48567336902160463]]],
[[[3.4562176552233623, -1.5291903913231595, 4.9276217294297595, -1.4641622460496571], [-3.9633150641051529,
-1.3895475276782743, -2.0928641563143735, 4.286214622292805], [-0.016872120519226819, -0.86571000346058913,
4.2635805792181465, 4.0351866281897113]], [[-1.973695982407413, -4.452260246087465, -2.5681734906597109,
3.0954829513656215], [2.6526834215550927, -4.3976717675273207, 2.0111485813735106, 2.7969396373439324],
[-0.72100288848623784, 1.4868693846138363, 2.3876845459322045, -3.759851286518614]]]]))
arg1=Symbol(shape=(3, 2, 3, 4))
res=arg0-arg1
s1=numpy.array([[[[-1.2326165508314046, 0.019536700697927678, 3.3313535404093759, -2.4782775769684271],
[3.9342491756801525, 1.2904741959913864, -2.7701975380199206, 2.4757520771582744], [2.5202328466158281,
-1.3683915774027189, 3.4678638218372768, -2.2884507446983129]], [[-4.9275394706777931, 4.7975831194456333,
1.7829898690658723, -0.96339421834763073], [-2.7923805247323799, -0.026981154987572253, 2.5136604629187271,
0.14658337947380495], [1.1254475424349959, 4.8000437885357261, 3.3479331374253167, 1.6298765760037002]]],
[[[-0.46473842692243572, 1.2430212762010644, -0.23618382206216726, -1.2230171932711418], [2.0127498669810855,
-0.31475870950595031, -0.20645609212011973, -4.9825089187683691], [-4.6108703987985988, -0.47963035537661725,
-3.1919702863790422, -3.9993603357626117]], [[3.8402219409685951, 3.04406815317755, 4.7640360318949195,
1.5279973254325983], [-4.9716807317737235, -3.4706635767559693, -1.2581696190523903, -2.591452040312936],
[1.6191001515432157, -3.5419762128533741, 0.92904425652178801, 4.6966930122512043]]], [[[-2.4787875268428614,
4.8717538415307775, 3.6264063974305554, 2.0645154974740256], [-4.5070489852671329, 2.3540394703493703,
3.2007816723140134, -0.44359603196672026], [2.5406621078154732, 3.6651768892659895, -2.7039262200534422,
-1.9309627063916244]], [[-0.037762488646412962, -4.6825147640959859, -3.1180187992817956, -0.3407644296025687],
[-1.6601757648009907, -1.0174825465103088, 0.060955158106047236, 1.2341204474061849], [-0.24621306712976931,
-1.3620636349151272, -0.12322079758969373, 2.3717593913603183]]]])
sub=res.substitute({arg1:s1})
ref=Data(numpy.array([[[[3.4195887151340623, 0.33588421202630947, -0.82135912900617924, 7.2058788351634266],
[-4.1702094467923096, 1.9652386346759343, 0.35272971667916408, 2.4268245077424844], [0.97852741507089469,
-1.0286051953489905, -0.85314878600463384, -1.6653601644373448]], [[5.4690511370889405, -0.45425172873840225,
-5.7654309880053844, 1.0753246271882686], [-1.5403155258109722, -2.628520989997388, -4.1786609737096292,
-0.35936596703548762], [1.8184250838666145, -0.18545245479509909, -4.7762684229599488, 2.5658709535931008]]],
[[[0.87764237349318552, -0.99083541412011833, 4.4589715813857298, -2.6165514895006305], [-6.2591726817710285,
-3.9560442056987286, -4.2726692340892418, 7.3528943252374912], [4.2901236796849256, -3.5836961001910401,
-1.6115201863831801, 4.1006058324942067]], [[-0.45421673325854073, 1.3607775141206186, -1.4381454897611938,
-2.1339081078568638], [7.9291509614970064, 6.4097422285715888, 4.3209276640398713, 0.40116993649388366],
[-0.34252321249825979, 8.0862595069726559, -0.45872939180614747, -7.9061731796816552]]], [[[3.9760502676225826,
-7.6231712403118408, -3.4289619562970115, -0.69242339986392842], [1.359924124562129, -6.0248086655049589,
-2.0486251770860431, 0.31865800676675793], [-1.1688809920138858, -5.4031603538105379, 0.11197181985375249,
-2.4885394945212962]], [[-3.4700588471292453, 6.2734662516961768, 7.0506373485720086, 0.66920910308663739],
[1.6230923495147622, 3.3764319511262237, -3.7692333212358951, -6.1782529393149614], [1.4658017324172024,
-0.7534728401373525, 2.4675247740574102, 1.7901196668748129]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0543153066289177, -0.60993483986488961, -5.2787968816697877,
6.6449120845536473], [-2.9308651353143738, -3.127038058331407, 1.6229079932643922, -1.9853120925455947],
[-0.99445074799230415, 2.3554973358366724, -1.5157034913103695, 3.7866725424480947]], [[9.7380409688000302,
-4.6150254409335911, -1.6820598986517332, 3.3244655799209974], [6.6563346832121599, 1.872508831895292,
1.4141595237813736, 2.4035342968107818], [2.1670576238649488, -4.0187477666923845, -4.0789919075318704,
1.3080158085945692]]], [[[1.7809732180709306, -2.9964796571409007, -4.2383736454531071, 2.0668986559170763],
[-4.1526132246568164, 1.9372143364275782, 4.2215625600542834, 5.7989765187911448], [5.5659366984874836,
-2.9051561644440977, 5.2815847993663727, 5.4795817972713179]], [[-1.2649831384283114, -5.7535478777622968,
-7.4448515343652302, -3.3060164867396624], [4.384123449911681, -0.83909889251312553, 4.9339604031919588,
0.76022781599217515], [-5.3420137500892988, 1.9632770236400177, 1.7604062191834737, -5.1823663812728089]]],
[[[5.9350051820662237, -6.400944232853937, 1.3012153319992041, -3.5286777435236827], [0.54373392116198005,
-3.7435869980276446, -5.293645828628387, 4.7298106542595253], [-2.5575342283347, -4.5308868927265786,
6.9675067992715887, 5.9661493345813357]], [[-1.935933493761, 0.23025451800852093, 0.54984530862208469,
3.4362473809681902], [4.3128591863560839, -3.3801892210170119, 1.9501934232674634, 1.5628191899377475],
[-0.47478982135646852, 2.8489330195289635, 2.5109053435218982, -6.1316106778789319]]]]))
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank0(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.42413566075)+(1.-msk_arg0)*(2.73592046896)
arg1=Symbol(shape=())
res=arg0-arg1
s1=numpy.array(0.0730314190245)
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*(2.35110424173)+(1.-msk_ref)*(2.66288904994)
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank1(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(-2.38585027921)+(1.-msk_arg0)*(-2.14546935212)
arg1=Symbol(shape=(2,))
res=arg0-arg1
s1=numpy.array([1.0449404678521192, -2.9654578889240057])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([-3.4307907470591283,
0.57960760971699665])+(1.-msk_ref)*numpy.array([-3.1904098199744872, 0.81998853680163775])
self.assertTrue(isinstance(res,Symbol),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(sub-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sub_overloaded_expandedData_rank0_Symbol_rank2(self):
msk_arg0=whereNegative(self.functionspace.getX()[0]-0.5)
arg0=msk_arg0*(2.15276640076)+(1.-msk_arg0)*(-2.04284766814)
arg1=Symbol(shape=(4, 5))
res=arg0-arg1
s1=numpy.array([[-2.5429314638433684, 2.0318827224945402, -2.3636856893688076, 3.4855417570765717,
0.44952339669472341], [2.5403509140391156, 2.3524971436536095, 3.9461465487262188, 2.6955339698780154,
-0.45702899742654868], [-1.0602022717036155, 0.74771157767510843, 1.6452939357358289, -3.0322095528230921,
1.6787335078454735], [-4.263078102519902, 3.2046384335109863, 4.0147512257312048, 3.3998288702285713,
-0.56118778404289138]])
sub=res.substitute({arg1:s1})
msk_ref=1.-whereZero(self.functionspace.getX()[0],1.e-8)
ref=msk_ref*numpy.array([[4.6956978646047602, 0.12088367826685165, 4.5164520901301994, -1.3327753563151798,
1.7032430040666684], [-0.38758451327772381, -0.19973074289221771, -1.793380147964827, -0.5427675691166236,
2.6097953981879405], [3.2129686724650073, 1.4050548230862834, 0.50747246502556287, 5.1849759535844839,
0.47403289291591832], [6.4158445032812939, -1.0518720327495945, -1.861984824969813, -1.2470624694671795,
2.7139541848042832]])+(1.-msk_ref)* | numpy.array([[0.50008379570506278, -4.0747303906328458, 0.32083802123050198,
-5.5283894252148773, -2.4923710648330291], [-4.5831985821774213, -4.3953448117919152, -5.9889942168645245,
-4.7383816380163211, -1.585818670711757], [-0.98264539643469018, -2.7905592458134141, -3.6881416038741346,
0.98936188468478647, -3.7215811759837791], [2.2202304343815964, -5.247486101649292, -6.0575988938695104,
-5.4426765383668769, -1.4816598840954143]]) | numpy.array |
from genotypes import PRIMITIVES, PRIMITIVES_DARTS, Genotype_opt, Genotype_nested, ResNet18, Xception, residual_layer_simple, ResNet50
import pandas as pd
import numpy as np
from scipy.spatial.distance import hamming
import plotly.express as px
import json
from train import TrainArgs, TrainNetwork
from scipy.stats import describe
import time
def hausdorff_metric(u, v, seed=0):
'''
Turns Hausdorff distance into a metric by enforcing symmetry.
'''
return max(global_hausdorff_distance(u, v, seed), global_hausdorff_distance(v, u, seed))
def cell_hausdorff_distance(c1, c2, seed=0, stats_file_path="op_stats.json"):
'''
Computes Hausdorff distance between two cells based on operation performance stats as weights of Hamming distance rather than standard Euclidian distance.
'''
with open(stats_file_path) as f:
op_stats = np.array(list(json.load(f).values()))
cmax = cmin = d = 0
N1 = c1.shape[0]
N2 = c2.shape[0]
i_store = j_store = i_ret = j_ret = 0
# shuffling the points in each array generally increases the likelihood of
# an advantageous break in the inner search loop and never decreases the
# performance of the algorithm
rng = np.random.RandomState(seed)
resort1 = np.arange(N1, dtype=np.int64)
resort2 = np.arange(N2, dtype=np.int64)
rng.shuffle(resort1)
rng.shuffle(resort2)
ar1 = np.asarray(c1)[resort1]
ar2 = np.asarray(c2)[resort2]
cmax = 0
for i in range(N1):
cmin = np.inf
for j in range(N2):
d = hamming(ar1[i], ar2[j], w=op_stats)
if d < cmax: # break out of `for j` loop
break
if d < cmin: # always true on first iteration of for-j loop
cmin = d
i_store = i
j_store = j
# always true on first iteration of for-j loop, after that only
# if d >= cmax
if cmin >= cmax and d >= cmax:
cmax = cmin
i_ret = i_store
j_ret = j_store
return cmax
def deserialize_architecture_to_alphas(genotype, parsing_method="threshold"):
'''
Deserialize an architecture from a genotype to alphas weights.
'''
prims = PRIMITIVES if isinstance(genotype, Genotype_opt) else PRIMITIVES_DARTS
if parsing_method != "threshold":
raise "Only threshold parsing method is supported for now."
steps = genotype.concat[-1] - 1
k = sum(1 for i in range(steps) for n in range(i+2))
alphas = np.zeros((len(genotype.genes), k, len(prims)))
for i, cell in enumerate(genotype.genes):
for op, to, f in cell:
offset = to - 2
pos = sum(1 for i in range(offset) for n in range(i+2))
alphas[i][pos+f][prims.index(op)] = 10.0
return alphas
def show_genotype_stats(g, save_path):
'''
Show the statistical dispersion of operations in a genotype and save a pie chart to the disk.
'''
prims = PRIMITIVES if isinstance(g, Genotype_opt) else PRIMITIVES_DARTS
glob_stats = {p: 0 for p in prims}
cell_stats = []
for i, c in enumerate(g.genes):
stats = {p: 0 for p in prims}
for op in c:
stats[op[0]] += 1
glob_stats[op[0]] += 1
cell_stats.append(stats)
#fig = go.Figure(data=[go.Pie(labels=list(glob_stats.keys()), values=list(glob_stats.values()))])
#fig.write_image(save_path)
def architectural_distance_metric(g1: Genotype_nested, g2: Genotype_nested, save_path: str = None):
a1 = deserialize_architecture_to_alphas(g1)
a2 = deserialize_architecture_to_alphas(g2)
min_shape, max_shape = | np.sort([a1.shape[0], a2.shape[0]]) | numpy.sort |
# Copyright 2019 <NAME> & <NAME>
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
:mod:`~obstools.atacr.utils` contains several functions that are used in the
class methods of `~obstools.atacr.classes`.
"""
import os
import math
import numpy as np
import fnmatch
from matplotlib import pyplot as plt
from obspy.core import read, Stream, Trace, AttribDict, UTCDateTime
def traceshift(trace, tt):
"""
Function to shift traces in time given travel time
Parameters
----------
trace : :class:`~obspy.core.Trace` object
Trace object to update
tt : float
Time shift in seconds
Returns
-------
rtrace : :class:`~obspy.core.Trace` object
Updated trace object
"""
# Define frequencies
nt = trace.stats.npts
dt = trace.stats.delta
freq = np.fft.fftfreq(nt, d=dt)
# Fourier transform
ftrace = np.fft.fft(trace.data)
# Shift
for i in range(len(freq)):
ftrace[i] = ftrace[i]*np.exp(-2.*np.pi*1j*freq[i]*tt)
# Back Fourier transform and return as trace
rtrace = trace.copy()
rtrace.data = np.real(np.fft.ifft(ftrace))
# Update start time
rtrace.stats.starttime -= tt
return rtrace
def QC_streams(start, end, st):
"""
Function for quality control of traces, which compares the
start and end times that were requested, as well as the total n
length of the traces.
Parameters
----------
start : :class:`~obspy.core.UTCDateTime` object
Start time of requested stream
end : :class:`~obspy.core.UTCDateTime` object
End time of requested stream
st : :class:`~obspy.core.Stream` object
Stream object with all trace data
Returns
-------
(pass): bool
Whether the QC test has passed
st : :class:`~obspy.core.Stream` object
Updated stream object
"""
# Check start times
if not np.all([tr.stats.starttime == start for tr in st]):
print("* Start times are not all close to true start: ")
[print("* "+tr.stats.channel+" " +
str(tr.stats.starttime)+" " +
str(tr.stats.endtime)) for tr in st]
print("* True start: "+str(start))
print("* -> Shifting traces to true start")
delay = [tr.stats.starttime - start for tr in st]
st_shifted = Stream(
traces=[traceshift(tr, dt) for tr, dt in zip(st, delay)])
st = st_shifted.copy()
# Try trimming
dt = st[0].stats.delta
try:
st.trim(start, end-dt, fill_value=0., pad=True)
except Exception:
print("* Unable to trim")
print("* -> Skipping")
print("**************************************************")
return False, None
# Check final lengths - they should all be equal if start times
# and sampling rates are all equal and traces have been trimmed
sr = st[0].stats.sampling_rate
if not np.allclose([tr.stats.npts for tr in st[1:]], st[0].stats.npts):
print("* Lengths are incompatible: ")
[print("* "+str(tr.stats.npts)) for tr in st]
print("* -> Skipping")
print("**************************************************")
return False, None
elif not np.allclose([st[0].stats.npts], int((end - start)*sr), atol=1):
print("* Length is too short: ")
print("* "+str(st[0].stats.npts) +
" ~= "+str(int((end - start)*sr)))
print("* -> Skipping")
print("**************************************************")
return False, None
else:
return True, st
def update_stats(tr, stla, stlo, stel, cha, evla=None, evlo=None):
"""
Function to include SAC metadata to :class:`~obspy.core.Trace` objects
Parameters
----------
tr : :class:`~obspy.core.Trace` object
Trace object to update
stla : float
Latitude of station
stlo : float
Longitude of station
stel : float
Station elevation (m)
cha : str
Channel for component
evla : float, optional
Latitude of event
evlo : float, optional
Longitute of event
Returns
-------
tr : :class:`~obspy.core.Trace` object
Updated trace object
"""
tr.stats.sac = AttribDict()
tr.stats.sac.stla = stla
tr.stats.sac.stlo = stlo
tr.stats.sac.stel = stel
tr.stats.sac.kcmpnm = cha
tr.stats.channel = cha
if evla is not None and evlo is not None:
tr.stats.sac.evla = evla
tr.stats.sac.evlo = evlo
return tr
def get_data(datapath, tstart, tend):
"""
Function to grab all available noise data given a path and data time range
Parameters
----------
datapath : str
Path to noise data folder
tstart : :class:`~obspy.class.UTCDateTime`
Start time for query
tend : :class:`~obspy.class.UTCDateTime`
End time for query
Returns
-------
tr1, tr2, trZ, trP : :class:`~obspy.core.Trace` object
Corresponding trace objects for components H1, H2, HZ and HP. Returns
empty traces for missing components.
"""
# Define empty streams
trN1 = Stream()
trN2 = Stream()
trNZ = Stream()
trNP = Stream()
# Time iterator
t1 = tstart
# Cycle through each day within time range
while t1 < tend:
# Time stamp used in file name
tstamp = str(t1.year).zfill(4)+'.'+str(t1.julday).zfill(3)+'.'
# Cycle through directory and load files
p = datapath.glob('*.*')
files = [x for x in p if x.is_file()]
for file in files:
if fnmatch.fnmatch(str(file), '*' + tstamp + '*1.SAC'):
tr = read(str(file))
trN1.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*2.SAC'):
tr = read(str(file))
trN2.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*Z.SAC'):
tr = read(str(file))
trNZ.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*H.SAC'):
tr = read(str(file))
trNP.append(tr[0])
# Increase increment
t1 += 3600.*24.
# Fill with empty traces if components are not found
ntr = len(trNZ)
if not trN1 and not trN2:
for i in range(ntr):
trN1.append(Trace())
trN2.append(Trace())
if not trNP:
for i in range(ntr):
trNP.append(Trace())
if ntr > 0:
# Check that all sampling rates are equal - otherwise resample
if trNZ[0].stats.sampling_rate != trNP[0].stats.sampling_rate:
# These checks assume that all seismic data have the same sampling
if trNZ[0].stats.sampling_rate < trNP[0].stats.sampling_rate:
trNP.resample(trNZ[0].stats.sampling_rate, no_filter=False)
else:
trNZ.resample(trNP[0].stats.sampling_rate, no_filter=False)
if trN1:
trN1.resample(trNP[0].stats.sampling_rate, no_filter=False)
if trN2:
trN2.resample(trNP[0].stats.sampling_rate, no_filter=False)
return trN1, trN2, trNZ, trNP
def get_event(eventpath, tstart, tend):
"""
Function to grab all available earthquake data given a path and data time
range
Parameters
----------
eventpath : str
Path to earthquake data folder
tstart : :class:`~obspy.class.UTCDateTime`
Start time for query
tend : :class:`~obspy.class.UTCDateTime`
End time for query
Returns
-------
tr1, tr2, trZ, trP : :class:`~obspy.core.Trace` object
Corresponding trace objects for components H1, H2, HZ and HP. Returns
empty traces for missing components.
"""
# Find out how many events from Z.SAC files
eventfiles = list(eventpath.glob('*Z.SAC'))
if not eventfiles:
raise(Exception("No event found in folder "+str(eventpath)))
# Extract events from time stamps
prefix = [file.name.split('.') for file in eventfiles]
evstamp = [p[0]+'.'+p[1]+'.'+p[2]+'.'+p[3]+'.' for p in prefix]
evDateTime = [UTCDateTime(p[0]+'-'+p[1]+'T'+p[2]+":"+p[3]) for p in prefix]
# Define empty streams
tr1 = Stream()
tr2 = Stream()
trZ = Stream()
trP = Stream()
# Cycle over all available files in time range
for event, tstamp in zip(evDateTime, evstamp):
if event >= tstart and event <= tend:
# Cycle through directory and load files
p = list(eventpath.glob('*.SAC'))
files = [x for x in p if x.is_file()]
for file in files:
if fnmatch.fnmatch(str(file), '*' + tstamp + '*1.SAC'):
tr = read(str(file))
tr1.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*2.SAC'):
tr = read(str(file))
tr2.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*Z.SAC'):
tr = read(str(file))
trZ.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*H.SAC'):
tr = read(str(file))
trP.append(tr[0])
# Fill with empty traces if components are not found
ntr = len(trZ)
if not tr1 and not tr2:
for i in range(ntr):
tr1.append(Trace())
tr2.append(Trace())
if not trP:
for i in range(ntr):
trP.append(Trace())
if ntr > 0:
# Check that all sampling rates are equal - otherwise resample
if trZ[0].stats.sampling_rate != trP[0].stats.sampling_rate:
# These checks assume that all seismic data have the same sampling
if trZ[0].stats.sampling_rate < trP[0].stats.sampling_rate:
trP.resample(trZ[0].stats.sampling_rate, no_filter=False)
else:
trZ.resample(trP[0].stats.sampling_rate, no_filter=False)
if tr1:
tr1.resample(trP[0].stats.sampling_rate, no_filter=False)
if tr2:
tr2.resample(trP[0].stats.sampling_rate, no_filter=False)
return tr1, tr2, trZ, trP
def calculate_tilt(ft1, ft2, ftZ, ftP, f, goodwins, tiltfreq=[0.005, 0.035]):
"""
Determines tilt direction from maximum coherence between rotated H1 and Z.
Parameters
----------
ft1, ft2, ftZ, ftP : :class:`~numpy.ndarray`
Fourier transform of corresponding H1, H2, HZ and HP components
f : :class:`~numpy.ndarray`
Frequency axis in Hz
goodwins : list
List of booleans representing whether a window is good (True) or not
(False). This attribute is returned from the method
:func:`~obstools.atacr.classes.DayNoise.QC_daily_spectra`
tiltfreq : list, optional
Two floats representing the frequency band at which the tilt is
calculated
Returns
-------
cHH, cHZ, cHP : :class:`~numpy.ndarray`
Arrays of power and cross-spectral density functions of components HH
(rotated H1 in direction of maximum tilt), HZ, and HP
coh : :class:`~numpy.ndarray`
Coherence value between rotated H and Z components, as a function of
directions (azimuths)
ph : :class:`~numpy.ndarray`
Phase value between rotated H and Z components, as a function of
directions (azimuths)
direc : :class:`~numpy.ndarray`
Array of directions (azimuths) considered
tilt : float
Direction (azimuth) of maximum coherence between rotated H1 and Z
coh_value : float
Coherence value at tilt direction
phase_value : float
Phase value at tilt direction
"""
direc = np.arange(0., 360., 10.)
coh = np.zeros(len(direc))
ph = np.zeros(len(direc))
cZZ = np.abs(np.mean(ftZ[goodwins, :] *
np.conj(ftZ[goodwins, :]), axis=0))[0:len(f)]
for i, d in enumerate(direc):
# Rotate horizontals
ftH = rotate_dir(ft1, ft2, d)
# Get transfer functions
cHH = np.abs(np.mean(ftH[goodwins, :] *
np.conj(ftH[goodwins, :]), axis=0))[0:len(f)]
cHZ = np.mean(ftH[goodwins, :] *
| np.conj(ftZ[goodwins, :]) | numpy.conj |
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy as np
from scipy.stats import binned_statistic
import astropy.units as u
from astropy.coordinates import Angle
from scipy.stats import t as t_dist
def pspec(psd2, nbins=None, return_stddev=False, binsize=1.0,
logspacing=True, max_bin=None, min_bin=None, return_freqs=True,
theta_0=None, delta_theta=None, boot_iter=None,
mean_func=np.nanmean):
'''
Calculate the radial profile using scipy.stats.binned_statistic.
Parameters
----------
psd2 : np.ndarray
2D Spectral power density.
nbins : int, optional
Number of bins to use. If None, it is calculated based on the size
of the given arrays.
return_stddev : bool, optional
Return the standard deviations in each bin.
binsize : float, optional
Size of bins to be used. If logspacing is enabled, this will increase
the number of bins used by the inverse of the given binsize.
logspacing : bool, optional
Use logarithmically spaces bins.
max_bin : float, optional
Give the maximum value to bin to.
min_bin : float, optional
Give the minimum value to bin to.
return_freqs : bool, optional
Return spatial frequencies.
theta_0 : `~astropy.units.Quantity`, optional
The center angle of the azimuthal mask. Must have angular units.
delta_theta : `~astropy.units.Quantity`, optional
The width of the azimuthal mask. This must be given when
a `theta_0` is given. Must have angular units.
boot_iter : int, optional
Number of bootstrap iterations for estimating the standard deviation
in each bin. Require `return_stddev=True`.
mean_func : function, optional
Define the function used to create the 1D power spectrum. The default
is `np.nanmean`.
Returns
-------
bins_cents : np.ndarray
Centre of the bins.
ps1D : np.ndarray
1D binned power spectrum.
ps1D_stddev : np.ndarray
Returned when return_stddev is enabled. Standard deviations
within each of the bins.
'''
yy, xx = make_radial_arrays(psd2.shape)
dists = np.sqrt(yy**2 + xx**2)
if theta_0 is not None:
if delta_theta is None:
raise ValueError("Must give delta_theta.")
theta_0 = theta_0.to(u.rad)
delta_theta = delta_theta.to(u.rad)
theta_limits = Angle([theta_0 - 0.5 * delta_theta,
theta_0 + 0.5 * delta_theta])
# Define theta array
thetas = Angle(np.arctan2(yy, xx) * u.rad)
# Wrap around pi
theta_limits = theta_limits.wrap_at(np.pi * u.rad)
if nbins is None:
nbins = int(np.round(dists.max() / binsize) + 1)
if return_freqs:
yy_freq, xx_freq = make_radial_freq_arrays(psd2.shape)
freqs_dist = np.sqrt(yy_freq**2 + xx_freq**2)
zero_freq_val = freqs_dist[np.nonzero(freqs_dist)].min() / 2.
freqs_dist[freqs_dist == 0] = zero_freq_val
if max_bin is None:
if return_freqs:
max_bin = 0.5
else:
max_bin = dists.max()
if min_bin is None:
if return_freqs:
min_bin = 1.0 / min(psd2.shape)
else:
min_bin = 0.5
if logspacing:
bins = np.logspace(np.log10(min_bin), np.log10(max_bin), nbins + 1)
else:
bins = np.linspace(min_bin, max_bin, nbins + 1)
if return_freqs:
dist_arr = freqs_dist
else:
dist_arr = dists
if theta_0 is not None:
if theta_limits[0] < theta_limits[1]:
azim_mask = np.logical_and(thetas >= theta_limits[0],
thetas <= theta_limits[1])
else:
azim_mask = np.logical_or(thetas >= theta_limits[0],
thetas <= theta_limits[1])
azim_mask = np.logical_or(azim_mask, azim_mask[::-1, ::-1])
# Fill in the middle angles
ny = np.floor(psd2.shape[0] / 2.).astype(int)
nx = np.floor(psd2.shape[1] / 2.).astype(int)
azim_mask[ny - 1:ny + 1, nx - 1:nx + 1] = True
else:
azim_mask = None
finite_mask = np.isfinite(psd2)
if azim_mask is not None:
finite_mask = np.logical_and(finite_mask, azim_mask)
ps1D, bin_edge, cts = binned_statistic(dist_arr[finite_mask].ravel(),
psd2[finite_mask].ravel(),
bins=bins,
statistic=mean_func)
bin_cents = (bin_edge[1:] + bin_edge[:-1]) / 2.
if not return_stddev:
if theta_0 is not None:
return bin_cents, ps1D, azim_mask
else:
return bin_cents, ps1D
else:
if boot_iter is None:
stat_func = lambda x: np.nanstd(x, ddof=1)
else:
from astropy.stats import bootstrap
stat_func = lambda data: np.mean(bootstrap(data, boot_iter,
bootfunc=np.std))
ps1D_stddev = binned_statistic(dist_arr[finite_mask].ravel(),
psd2[finite_mask].ravel(),
bins=bins,
statistic=stat_func)[0]
# We're dealing with variations in the number of samples for each bin.
# Add a correction based on the t distribution
bin_cts = binned_statistic(dist_arr[finite_mask].ravel(),
psd2[finite_mask].ravel(),
bins=bins,
statistic='count')[0]
# Two-tail CI for 85% (~1 sigma)
alpha = 1 - (0.15 / 2.)
# Correction factor to convert to the standard error
A = t_dist.ppf(alpha, bin_cts - 1) / np.sqrt(bin_cts)
# If the standard error is larger than the standard deviation,
# use it instead
ps1D_stddev[A > 1] *= A[A > 1]
# Mask out bins that have 1 or fewer points
mask = bin_cts <= 1
ps1D_stddev[mask] = np.NaN
ps1D[mask] = np.NaN
# ps1D_stddev[ps1D_stddev == 0.] = np.NaN
if theta_0 is not None:
return bin_cents, ps1D, ps1D_stddev, azim_mask
else:
return bin_cents, ps1D, ps1D_stddev
def make_radial_arrays(shape, y_center=None, x_center=None):
if y_center is None:
y_center = np.floor(shape[0] / 2.).astype(int)
else:
y_center = int(y_center)
if x_center is None:
x_center = np.floor(shape[1] / 2.).astype(int)
else:
x_center = int(x_center)
y = | np.arange(-y_center, shape[0] - y_center) | numpy.arange |
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_almost_equal,
assert_array_equal, assert_allclose,
assert_array_less)
import pytest
from scipy.signal import resample as sp_resample, butter, freqz, sosfreqz
from mne import create_info, Epochs
from numpy.fft import fft, fftfreq
from mne.io import RawArray, read_raw_fif
from mne.io.pick import _DATA_CH_TYPES_SPLIT
from mne.filter import (filter_data, resample, _resample_stim_channels,
construct_iir_filter, notch_filter, detrend,
_overlap_add_filter, _smart_pad, design_mne_c_filter,
estimate_ringing_samples, create_filter,
_length_factors)
from mne.utils import sum_squared, catch_logging, requires_mne, run_subprocess
def test_filter_array():
"""Test filtering an array."""
for data in (np.zeros((11, 1, 10)), np.zeros((9, 1, 10))):
filter_data(data, 512., 8, 12, method='iir',
iir_params=dict(ftype='butterworth', order=2))
@requires_mne
def test_mne_c_design(tmp_path):
"""Test MNE-C filter design."""
tempdir = str(tmp_path)
temp_fname = op.join(tempdir, 'test_raw.fif')
out_fname = op.join(tempdir, 'test_c_raw.fif')
x = np.zeros((1, 10001))
x[0, 5000] = 1.
time_sl = slice(5000 - 4096, 5000 + 4097)
sfreq = 1000.
RawArray(x, create_info(1, sfreq, 'eeg')).save(temp_fname)
tols = dict(rtol=1e-4, atol=1e-4)
cmd = ('mne_process_raw', '--projoff', '--raw', temp_fname,
'--save', out_fname)
run_subprocess(cmd)
h = design_mne_c_filter(sfreq, None, 40)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
run_subprocess(cmd + ('--highpass', '5', '--highpassw', '2.5'))
h = design_mne_c_filter(sfreq, 5, 40, 2.5)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
run_subprocess(cmd + ('--lowpass', '1000', '--highpass', '10'))
h = design_mne_c_filter(sfreq, 10, None, verbose=True)
h_c = read_raw_fif(out_fname)[0][0][0][time_sl]
assert_allclose(h, h_c, **tols)
def test_estimate_ringing():
"""Test our ringing estimation function."""
# Actual values might differ based on system, so let's be approximate
for kind in ('ba', 'sos'):
for thresh, lims in ((0.1, (30, 60)), # 47
(0.01, (300, 600)), # 475
(0.001, (3000, 6000)), # 4758
(0.0001, (30000, 60000))): # 37993
n_ring = estimate_ringing_samples(butter(3, thresh, output=kind))
assert lims[0] <= n_ring <= lims[1], (
'%s %s: %s <= %s <= %s'
% (kind, thresh, lims[0], n_ring, lims[1]))
with pytest.warns(RuntimeWarning, match='properly estimate'):
assert estimate_ringing_samples(butter(4, 0.00001)) == 100000
def test_1d_filter():
"""Test our private overlap-add filtering function."""
# make some random signals and filters
rng = np.random.RandomState(0)
for n_signal in (1, 2, 3, 5, 10, 20, 40):
x = rng.randn(n_signal)
for n_filter in (1, 2, 3, 5, 10, 11, 20, 21, 40, 41, 100, 101):
for filter_type in ('identity', 'random'):
if filter_type == 'random':
h = rng.randn(n_filter)
else: # filter_type == 'identity'
h = np.concatenate([[1.], np.zeros(n_filter - 1)])
# ensure we pad the signal the same way for both filters
n_pad = n_filter - 1
x_pad = _smart_pad(x, (n_pad, n_pad))
for phase in ('zero', 'linear', 'zero-double'):
# compute our expected result the slow way
if phase == 'zero':
# only allow zero-phase for odd-length filters
if n_filter % 2 == 0:
pytest.raises(RuntimeError, _overlap_add_filter,
x[np.newaxis], h, phase=phase)
continue
shift = (len(h) - 1) // 2
x_expected = np.convolve(x_pad, h)
x_expected = x_expected[shift:len(x_expected) - shift]
elif phase == 'zero-double':
shift = len(h) - 1
x_expected = np.convolve(x_pad, h)
x_expected = np.convolve(x_expected[::-1], h)[::-1]
x_expected = x_expected[shift:len(x_expected) - shift]
shift = 0
else:
shift = 0
x_expected = np.convolve(x_pad, h)
x_expected = x_expected[:len(x_expected) - len(h) + 1]
# remove padding
if n_pad > 0:
x_expected = x_expected[n_pad:len(x_expected) - n_pad]
assert len(x_expected) == len(x)
# make sure we actually set things up reasonably
if filter_type == 'identity':
out = x_pad.copy()
out = out[shift + n_pad:]
out = out[:len(x)]
out = np.concatenate((out, np.zeros(max(len(x) -
len(out), 0))))
assert len(out) == len(x)
assert_allclose(out, x_expected)
assert len(x_expected) == len(x)
# compute our version
for n_fft in (None, 32, 128, 129, 1023, 1024, 1025, 2048):
# need to use .copy() b/c signal gets modified inplace
x_copy = x[np.newaxis, :].copy()
min_fft = 2 * n_filter - 1
if phase == 'zero-double':
min_fft = 2 * min_fft - 1
if n_fft is not None and n_fft < min_fft:
pytest.raises(ValueError, _overlap_add_filter,
x_copy, h, n_fft, phase=phase)
else:
x_filtered = _overlap_add_filter(
x_copy, h, n_fft, phase=phase)[0]
assert_allclose(x_filtered, x_expected, atol=1e-13)
def test_iir_stability():
"""Test IIR filter stability check."""
sig = np.random.RandomState(0).rand(1000)
sfreq = 1000
# This will make an unstable filter, should throw RuntimeError
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir', iir_params=dict(ftype='butter', order=8,
output='ba'))
# This one should work just fine
filter_data(sig, sfreq, 0.6, None, method='iir',
iir_params=dict(ftype='butter', order=8, output='sos'))
# bad system type
pytest.raises(ValueError, filter_data, sig, sfreq, 0.6, None, method='iir',
iir_params=dict(ftype='butter', order=8, output='foo'))
# missing ftype
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir', iir_params=dict(order=8, output='sos'))
# bad ftype
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir',
iir_params=dict(order=8, ftype='foo', output='sos'))
# missing gstop
pytest.raises(RuntimeError, filter_data, sig, sfreq, 0.6, None,
method='iir', iir_params=dict(gpass=0.5, output='sos'))
# can't pass iir_params if method='fft'
pytest.raises(ValueError, filter_data, sig, sfreq, 0.1, None,
method='fft', iir_params=dict(ftype='butter', order=2,
output='sos'))
# method must be string
pytest.raises(TypeError, filter_data, sig, sfreq, 0.1, None,
method=1)
# unknown method
pytest.raises(ValueError, filter_data, sig, sfreq, 0.1, None,
method='blah')
# bad iir_params
pytest.raises(TypeError, filter_data, sig, sfreq, 0.1, None,
method='iir', iir_params='blah')
pytest.raises(ValueError, filter_data, sig, sfreq, 0.1, None,
method='fir', iir_params=dict())
# should pass because default trans_bandwidth is not relevant
iir_params = dict(ftype='butter', order=2, output='sos')
x_sos = filter_data(sig, 250, 0.5, None, method='iir',
iir_params=iir_params)
iir_params_sos = construct_iir_filter(iir_params, f_pass=0.5, sfreq=250,
btype='highpass')
x_sos_2 = filter_data(sig, 250, 0.5, None, method='iir',
iir_params=iir_params_sos)
assert_allclose(x_sos[100:-100], x_sos_2[100:-100])
x_ba = filter_data(sig, 250, 0.5, None, method='iir',
iir_params=dict(ftype='butter', order=2, output='ba'))
# Note that this will fail for higher orders (e.g., 6) showing the
# hopefully decreased numerical error of SOS
assert_allclose(x_sos[100:-100], x_ba[100:-100])
line_freqs = tuple(range(60, 241, 60))
@pytest.mark.parametrize('method, filter_length, line_freq, tol', [
('spectrum_fit', 'auto', None, 2), # 'auto' same as None on 0.21
('spectrum_fit', None, None, 2),
('spectrum_fit', '10s', None, 2),
('spectrum_fit', 'auto', line_freqs, 1),
('fft', 'auto', line_freqs, 1),
('fft', 8192, line_freqs, 1),
])
def test_notch_filters(method, filter_length, line_freq, tol):
"""Test notch filters."""
# let's use an ugly, prime sfreq for fun
rng = np.random.RandomState(0)
sfreq = 487
sig_len_secs = 21
t = np.arange(0, int(round(sig_len_secs * sfreq))) / sfreq
# make a "signal"
a = rng.randn(int(sig_len_secs * sfreq))
orig_power = np.sqrt(np.mean(a ** 2))
# make line noise
a += np.sum([np.sin(2 * np.pi * f * t) for f in line_freqs], axis=0)
# only allow None line_freqs with 'spectrum_fit' mode
for kind in ('fir', 'iir'):
with pytest.raises(ValueError, match='freqs=None can only be used wi'):
notch_filter(a, sfreq, None, kind)
with catch_logging() as log_file:
b = notch_filter(a, sfreq, line_freq, filter_length,
method=method, verbose=True)
if line_freq is None:
out = [line.strip().split(':')[0]
for line in log_file.getvalue().split('\n')
if line.startswith(' ')]
assert len(out) == 4, 'Detected frequencies not logged properly'
out = np.array(out, float)
assert_array_almost_equal(out, line_freqs)
new_power = np.sqrt(sum_squared(b) / b.size)
assert_almost_equal(new_power, orig_power, tol)
def test_resample():
"""Test resampling."""
rng = np.random.RandomState(0)
x = rng.normal(0, 1, (10, 10, 10))
x_rs = resample(x, 1, 2, 10)
assert x.shape == (10, 10, 10)
assert x_rs.shape == (10, 10, 5)
x_2 = x.swapaxes(0, 1)
x_2_rs = resample(x_2, 1, 2, 10)
assert_array_equal(x_2_rs.swapaxes(0, 1), x_rs)
x_3 = x.swapaxes(0, 2)
x_3_rs = resample(x_3, 1, 2, 10, 0)
assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs)
# make sure we cast to array if necessary
assert_array_equal(resample([0., 0.], 2, 1), [0., 0., 0., 0.])
def test_resample_scipy():
"""Test resampling against SciPy."""
n_jobs_test = (1, 'cuda')
for window in ('boxcar', 'hann'):
for N in (100, 101, 102, 103):
x = np.arange(N).astype(float)
err_msg = '%s: %s' % (N, window)
x_2_sp = sp_resample(x, 2 * N, window=window)
for n_jobs in n_jobs_test:
x_2 = resample(x, 2, 1, 0, window=window, n_jobs=n_jobs)
assert_allclose(x_2, x_2_sp, atol=1e-12, err_msg=err_msg)
new_len = int(round(len(x) * (1. / 2.)))
x_p5_sp = sp_resample(x, new_len, window=window)
for n_jobs in n_jobs_test:
x_p5 = resample(x, 1, 2, 0, window=window, n_jobs=n_jobs)
assert_allclose(x_p5, x_p5_sp, atol=1e-12, err_msg=err_msg)
@pytest.mark.parametrize('n_jobs', (2, 'cuda'))
def test_n_jobs(n_jobs):
"""Test resampling against SciPy."""
x = np.random.RandomState(0).randn(4, 100)
y1 = resample(x, 2, 1, n_jobs=1)
y2 = resample(x, 2, 1, n_jobs=n_jobs)
assert_allclose(y1, y2)
y1 = filter_data(x, 100., 0, 40, n_jobs=1)
y2 = filter_data(x, 100., 0, 40, n_jobs=n_jobs)
assert_allclose(y1, y2)
def test_resamp_stim_channel():
"""Test resampling of stim channels."""
# Downsampling
assert_array_equal(
_resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 2),
[[1, 0, 2, 0]])
assert_array_equal(
_resample_stim_channels([1, 0, 0, 0, 2, 0, 0, 0], 1, 1.5),
[[1, 0, 0, 2, 0]])
assert_array_equal(
_resample_stim_channels([1, 0, 0, 1, 2, 0, 0, 1], 1, 2),
[[1, 1, 2, 1]])
# Upsampling
assert_array_equal(
_resample_stim_channels([1, 2, 3], 2, 1), [[1, 1, 2, 2, 3, 3]])
assert_array_equal(
_resample_stim_channels([1, 2, 3], 2.5, 1), [[1, 1, 1, 2, 2, 3, 3, 3]])
# Proper number of samples in stim channel resampling from io/base.py
data_chunk = np.zeros((1, 315600))
for new_data_len in (52598, 52599, 52600, 52601, 315599, 315600):
new_data = _resample_stim_channels(data_chunk, new_data_len,
data_chunk.shape[1])
assert new_data.shape[1] == new_data_len
def test_resample_raw():
"""Test resampling using RawArray."""
x = np.zeros((1, 1001))
sfreq = 2048.
raw = RawArray(x, create_info(1, sfreq, 'eeg'))
raw.resample(128, npad=10)
data = raw.get_data()
assert data.shape == (1, 63)
def test_resample_below_1_sample():
"""Test resampling doesn't yield datapoints."""
# Raw
x = np.zeros((1, 100))
sfreq = 1000.
raw = RawArray(x, create_info(1, sfreq, 'eeg'))
raw.resample(5)
assert len(raw.times) == 1
assert raw.get_data().shape[1] == 1
# Epochs
x = np.zeros((1, 10000))
sfreq = 1000.
raw = RawArray(x, create_info(1, sfreq, 'eeg'))
events = np.array([[400, 0, 1],
[2000, 0, 1],
[3000, 0, 1]])
epochs = Epochs(raw, events, {'test': 1}, 0, 0.2, proj=False,
picks='eeg', baseline=None, preload=True,
verbose=False)
epochs.resample(1)
assert len(epochs.times) == 1
assert epochs.get_data().shape[2] == 1
@pytest.mark.slowtest
def test_filters():
"""Test low-, band-, high-pass, and band-stop filters plus resampling."""
rng = np.random.RandomState(0)
sfreq = 100
sig_len_secs = 15
a = rng.randn(2, sig_len_secs * sfreq)
# let's test our catchers
for fl in ['blah', [0, 1], 1000.5, '10ss', '10']:
pytest.raises((ValueError, TypeError),
filter_data, a, sfreq, 4, 8, None, fl,
1.0, 1.0, fir_design='firwin')
for nj in ['blah', 0.5]:
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8, None, 1000,
1.0, 1.0, n_jobs=nj, phase='zero', fir_design='firwin')
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8, None, 100,
1., 1., fir_window='foo')
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8, None, 10,
1., 1., fir_design='firwin') # too short
# > Nyq/2
pytest.raises(ValueError, filter_data, a, sfreq, 4, sfreq / 2., None,
100, 1.0, 1.0, fir_design='firwin')
pytest.raises(ValueError, filter_data, a, sfreq, -1, None, None,
100, 1.0, 1.0, fir_design='firwin')
# these should work
create_filter(None, sfreq, None, None)
create_filter(a, sfreq, None, None, fir_design='firwin')
create_filter(a, sfreq, None, None, method='iir')
# check our short-filter warning:
with pytest.warns(RuntimeWarning, match='attenuation'):
# Warning for low attenuation
filter_data(a, sfreq, 1, 8, filter_length=256, fir_design='firwin2')
with pytest.warns(RuntimeWarning, match='Increase filter_length'):
# Warning for too short a filter
filter_data(a, sfreq, 1, 8, filter_length='0.5s', fir_design='firwin2')
# try new default and old default
freqs = fftfreq(a.shape[-1], 1. / sfreq)
A = np.abs(fft(a))
kwargs = dict(fir_design='firwin')
for fl in ['auto', '10s', '5000ms', 1024, 1023]:
bp = filter_data(a, sfreq, 4, 8, None, fl, 1.0, 1.0, **kwargs)
bs = filter_data(a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0,
**kwargs)
lp = filter_data(a, sfreq, None, 8, None, fl, 10, 1.0, n_jobs=2,
**kwargs)
hp = filter_data(lp, sfreq, 4, None, None, fl, 1.0, 10, **kwargs)
assert_allclose(hp, bp, rtol=1e-3, atol=2e-3)
assert_allclose(bp + bs, a, rtol=1e-3, atol=1e-3)
# Sanity check ttenuation
mask = (freqs > 5.5) & (freqs < 6.5)
assert_allclose(np.mean(np.abs(fft(bp)[:, mask]) / A[:, mask]),
1., atol=0.02)
assert_allclose(np.mean(np.abs(fft(bs)[:, mask]) / A[:, mask]),
0., atol=0.2)
# now the minimum-phase versions
bp = filter_data(a, sfreq, 4, 8, None, fl, 1.0, 1.0,
phase='minimum', **kwargs)
bs = filter_data(a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0,
phase='minimum', **kwargs)
assert_allclose(np.mean(np.abs(fft(bp)[:, mask]) / A[:, mask]),
1., atol=0.11)
assert_allclose(np.mean(np.abs(fft(bs)[:, mask]) / A[:, mask]),
0., atol=0.3)
# and since these are low-passed, downsampling/upsampling should be close
n_resamp_ignore = 10
bp_up_dn = resample(resample(bp, 2, 1, n_jobs=2), 1, 2, n_jobs=2)
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# note that on systems without CUDA, this line serves as a test for a
# graceful fallback to n_jobs=1
bp_up_dn = resample(resample(bp, 2, 1, n_jobs='cuda'), 1, 2, n_jobs='cuda')
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# test to make sure our resamling matches scipy's
bp_up_dn = sp_resample(sp_resample(bp, 2 * bp.shape[-1], axis=-1,
window='boxcar'),
bp.shape[-1], window='boxcar', axis=-1)
assert_array_almost_equal(bp[n_resamp_ignore:-n_resamp_ignore],
bp_up_dn[n_resamp_ignore:-n_resamp_ignore], 2)
# make sure we don't alias
t = np.array(list(range(sfreq * sig_len_secs))) / float(sfreq)
# make sinusoid close to the Nyquist frequency
sig = np.sin(2 * np.pi * sfreq / 2.2 * t)
# signal should disappear with 2x downsampling
sig_gone = resample(sig, 1, 2)[n_resamp_ignore:-n_resamp_ignore]
assert_array_almost_equal(np.zeros_like(sig_gone), sig_gone, 2)
# let's construct some filters
iir_params = dict(ftype='cheby1', gpass=1, gstop=20, output='ba')
iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
# this should be a third order filter
assert iir_params['a'].size - 1 == 3
assert iir_params['b'].size - 1 == 3
iir_params = dict(ftype='butter', order=4, output='ba')
iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
assert iir_params['a'].size - 1 == 4
assert iir_params['b'].size - 1 == 4
iir_params = dict(ftype='cheby1', gpass=1, gstop=20)
iir_params = construct_iir_filter(iir_params, 40, 80, 1000, 'low')
# this should be a third order filter, which requires 2 SOS ((2, 6))
assert iir_params['sos'].shape == (2, 6)
iir_params = dict(ftype='butter', order=4, output='sos')
iir_params = construct_iir_filter(iir_params, 40, None, 1000, 'low')
assert iir_params['sos'].shape == (2, 6)
# check that picks work for 3d array with one channel and picks=[0]
a = rng.randn(5 * sfreq, 5 * sfreq)
b = a[:, None, :]
a_filt = filter_data(a, sfreq, 4, 8, None, 400, 2.0, 2.0,
fir_design='firwin')
b_filt = filter_data(b, sfreq, 4, 8, [0], 400, 2.0, 2.0,
fir_design='firwin')
assert_array_equal(a_filt[:, None, :], b_filt)
# check for n-dimensional case
a = rng.randn(2, 2, 2, 2)
with pytest.warns(RuntimeWarning, match='longer'):
pytest.raises(ValueError, filter_data, a, sfreq, 4, 8,
np.array([0, 1]), 100, 1.0, 1.0)
# check corner case (#4693)
want_length = int(round(_length_factors['hamming'] * 1000. / 0.5))
want_length += (want_length % 2 == 0)
assert want_length == 6601
h = create_filter(
np.empty(10000), 1000., l_freq=None, h_freq=55.,
h_trans_bandwidth=0.5, method='fir', phase='zero-double',
fir_design='firwin', verbose=True)
assert len(h) == 6601
h = create_filter(
np.empty(10000), 1000., l_freq=None, h_freq=55.,
h_trans_bandwidth=0.5, method='fir', phase='zero',
fir_design='firwin', filter_length='7s', verbose=True)
assert len(h) == 7001
h = create_filter(
np.empty(10000), 1000., l_freq=None, h_freq=55.,
h_trans_bandwidth=0.5, method='fir', phase='zero-double',
fir_design='firwin', filter_length='7s', verbose=True)
assert len(h) == 8193 # next power of two
def test_filter_auto():
"""Test filter auto parameters."""
# test that our overlap-add filtering doesn't introduce strange
# artifacts (from mne_analyze mailing list 2015/06/25)
N = 300
sfreq = 100.
lp = 10.
sine_freq = 1.
x = np.ones(N)
t = np.arange(N) / sfreq
x += np.sin(2 * np.pi * sine_freq * t)
x_orig = x.copy()
for pad in ('reflect_limited', 'reflect', 'edge'):
for fir_design in ('firwin2', 'firwin'):
kwargs = dict(fir_design=fir_design, pad=pad)
x = x_orig.copy()
x_filt = filter_data(x, sfreq, None, lp, **kwargs)
assert_array_equal(x, x_orig)
n_edge = 10
assert_allclose(x[n_edge:-n_edge], x_filt[n_edge:-n_edge],
atol=1e-2)
assert_array_equal(x_filt, filter_data(x, sfreq, None, lp, None,
**kwargs))
assert_array_equal(x, x_orig)
assert_array_equal(x_filt, filter_data(x, sfreq, None, lp,
**kwargs))
assert_array_equal(x, x_orig)
assert_array_equal(x_filt, filter_data(x, sfreq, None, lp,
copy=False, **kwargs))
assert_array_equal(x, x_filt)
# degenerate conditions
pytest.raises(ValueError, filter_data, x, -sfreq, 1, 10)
pytest.raises(ValueError, filter_data, x, sfreq, 1, sfreq * 0.75)
with pytest.raises(ValueError, match='Data to be filtered must be real'):
filter_data(x.astype(np.float32), sfreq, None, 10)
with pytest.raises(ValueError, match='Data to be filtered must be real'):
filter_data(1j, 1000., None, 40.)
def test_cuda_fir():
"""Test CUDA-based filtering."""
# Using `n_jobs='cuda'` on a non-CUDA system should be fine,
# as it should fall back to using n_jobs=1.
rng = np.random.RandomState(0)
sfreq = 500
sig_len_secs = 20
a = rng.randn(sig_len_secs * sfreq)
kwargs = dict(fir_design='firwin')
with catch_logging() as log_file:
for fl in ['auto', '10s', 2048]:
args = [a, sfreq, 4, 8, None, fl, 1.0, 1.0]
bp = filter_data(*args, **kwargs)
bp_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(bp, bp_c, 12)
args = [a, sfreq, 8 + 1.0, 4 - 1.0, None, fl, 1.0, 1.0]
bs = filter_data(*args, **kwargs)
bs_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(bs, bs_c, 12)
args = [a, sfreq, None, 8, None, fl, 1.0]
lp = filter_data(*args, **kwargs)
lp_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(lp, lp_c, 12)
args = [lp, sfreq, 4, None, None, fl, 1.0]
hp = filter_data(*args, **kwargs)
hp_c = filter_data(*args, n_jobs='cuda', verbose='info', **kwargs)
assert_array_almost_equal(hp, hp_c, 12)
# check to make sure we actually used CUDA
out = log_file.getvalue().split('\n')[:-1]
# triage based on whether or not we actually expected to use CUDA
from mne.cuda import _cuda_capable # allow above funs to set it
tot = 12 if _cuda_capable else 0
assert sum(['Using CUDA for FFT FIR filtering' in o for o in out]) == tot
if not _cuda_capable:
pytest.skip('CUDA not enabled')
def test_cuda_resampling():
"""Test CUDA resampling."""
rng = np.random.RandomState(0)
for window in ('boxcar', 'triang'):
for N in (997, 1000): # one prime, one even
a = rng.randn(2, N)
for fro, to in ((1, 2), (2, 1), (1, 3), (3, 1)):
a1 = resample(a, fro, to, n_jobs=1, npad='auto',
window=window)
a2 = resample(a, fro, to, n_jobs='cuda', npad='auto',
window=window)
| assert_allclose(a1, a2, rtol=1e-7, atol=1e-14) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
#
# Authors: Swolf <<EMAIL>>
# Date: 2021/1/23
# License: MIT License
"""
Riemannian Procrustes Analysis.
Modified from https://github.com/plcrodrigues/RPA
"""
from typing import Union, List, Tuple, Dict, Optional, Callable
from functools import partial
import numpy as np
from numpy import ndarray
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.utils.extmath import softmax
from joblib import Parallel, delayed
from scipy.linalg import eigvalsh, inv, eigh
import autograd.numpy as anp
try:
from pymanopt.manifolds import Rotations
except:
from pymanopt.manifolds import SpecialOrthogonalGroup as Rotations
from pymanopt import Problem
try:
from pymanopt.solvers import SteepestDescent
except:
from pymanopt.optimizers import SteepestDescent
from ..utils.covariance import (nearestPD, covariances, sqrtm, invsqrtm, logm, expm, powm)
from .riemann import mean_riemann, distance_riemann
def get_recenter(X: ndarray,
cov_method: str = 'cov',
mean_method: str = 'riemann',
n_jobs: Optional[int] = None):
X = np.reshape(X, (-1, *X.shape[-2:]))
X = X - np.mean(X, axis=-1, keepdims=True)
C = covariances(X, estimator=cov_method, n_jobs=n_jobs)
if mean_method == 'riemann':
M = mean_riemann(C, n_jobs=n_jobs)
elif mean_method == 'euclid':
M = np.mean(C, axis=0)
iM12 = invsqrtm(M)
return iM12
def recenter(X: ndarray, iM12: ndarray):
X = np.reshape(X, (-1, *X.shape[-2:]))
X = X - np.mean(X, axis=-1, keepdims=True)
return iM12@X
def get_rescale(X: ndarray,
cov_method: str = 'cov',
n_jobs: Optional[int] = None):
X = np.reshape(X, (-1, *X.shape[-2:]))
X = X - | np.mean(X, axis=-1, keepdims=True) | numpy.mean |
'''
<NAME>
2021
'''
import numpy as np
import cv2
from numpy.fft import fftn, ifftn, fft2, ifft2, fftshift
from numpy import conj, real
from utils import gaussian2d_rolled_labels, cos_window
from hog_cpp.fhog.get_hog import get_hog
vgg_path = 'model/imagenet-vgg-verydeep-19.mat'
def create_model():
from scipy import io
from keras.applications.vgg19 import VGG19
from keras.models import Model
mat = io.loadmat(vgg_path)
model = VGG19(mat)
ixs = [2, 5, 10, 15, 20]
outputs = [model.layers[i].output for i in ixs]
model = Model(inputs=model.inputs, outputs=outputs)
# model.summary()
return model
vgg_model = create_model()
class KernelizedCorrelationFilter:
def __init__(self, correlation_type='gaussian', feature='hog'):
self.padding = 1.5 # extra area surrounding the target #padding = 2 #extra area surrounding the target
self.lambda_ = 1e-4 # regularization
self.output_sigma_factor = 0.1 # spatial bandwidth (proportional to target)
self.correlation_type = correlation_type
self.feature = feature
self.resize = False
# GRAY
if feature == 'gray':
self.interp_factor = 0.075 # linear interpolation factor for adaptation
self.sigma = 0.2 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 7 # polynomial kernel exponent
self.gray = True
self.cell_size = 1
# HOG
elif feature == 'hog':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.hog = True
self.hog_orientations = 9
self.cell_size = 4
# DEEP
elif feature == 'deep':
self.interp_factor = 0.02 # linear interpolation factor for adaptation
self.sigma = 0.5 # gaussian kernel bandwidth
self.poly_a = 1 # polynomial kernel additive term
self.poly_b = 9 # polynomial kernel exponent
self.deep = True
self.cell_size = 4 # 8
def start(self, init_gt, show, frame_list):
poses = []
poses.append(init_gt)
init_frame = cv2.imread(frame_list[0])
x1, y1, w, h = init_gt
init_gt = tuple(init_gt)
self.init(init_frame, init_gt)
for idx in range(len(frame_list)):
if idx != 0:
current_frame = cv2.imread(frame_list[idx])
bbox = self.update(current_frame)
if bbox is not None:
x1, y1, w, h = bbox
if show is True:
if len(current_frame.shape) == 2:
current_frame = cv2.cvtColor(current_frame, cv2.COLOR_GRAY2BGR)
show_frame = cv2.rectangle(current_frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)),
(255, 0, 0), 1)
cv2.imshow('demo', show_frame)
cv2.waitKey(1)
else:
print('bbox is None')
poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
return np.array(poses)
def init(self, image, roi):
# Get image size and search window size
x, y, w, h = roi
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.target_sz = np.array([h, w])
self.target_sz_real = np.array([h, w])
self.pos = np.array([y + np.floor(h/2), x + np.floor(w/2)])
if np.sqrt(h * w) >= 100: # diagonal size >= threshold
self.resize = True
self.pos = np.floor(self.pos / 2)
self.target_sz = np.floor(self.target_sz / 2)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
# window size, taking padding into account
self.window_sz = np.floor(np.multiply(self.target_sz, (1 + self.padding)))
self.output_sigma = round(round(np.sqrt(self.target_sz[0]*self.target_sz[1]), 4) * self.output_sigma_factor / self.cell_size, 4)
yf_sz = np.floor(self.window_sz / self.cell_size)
yf_sz[0] = np.floor(self.window_sz / self.cell_size)[1]
yf_sz[1] = np.floor(self.window_sz / self.cell_size)[0]
gauss = gaussian2d_rolled_labels(yf_sz, self.output_sigma)
self.yf = fft2(gauss)
#store pre-computed cosine window
self.cos_window = cos_window([self.yf.shape[1], self.yf.shape[0]])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
kf = []
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
self.model_alphaf = alphaf
self.model_xf = xf
def update(self, image):
self.image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
if self.resize:
self.image = cv2.resize(self.image, (self.image.shape[1] // 2, self.image.shape[0] // 2))
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
zf = fftn(self.get_features(patch), axes=(0, 1))
if self.correlation_type == 'gaussian':
kzf = self.gaussian_correlation(zf, self.model_xf)
response = real(ifftn(self.model_alphaf * kzf, axes=(0, 1))) # equation for fast detection
# Find indices and values of nonzero elements curr = np.unravel_index(np.argmax(gi, axis=None), gi.shape)
delta = np.unravel_index(np.argmax(response, axis=None), response.shape)
vert_delta, horiz_delta = delta[0], delta[1]
if vert_delta > np.size(zf, 0) / 2: # wrap around to negative half-space of vertical axis
vert_delta = vert_delta - np.size(zf, 0)
if horiz_delta > np.size(zf, 1) / 2: # same for horizontal axis
horiz_delta = horiz_delta - np.size(zf, 1)
self.pos = self.pos + self.cell_size * np.array([vert_delta, horiz_delta])
# obtain a subwindow for training at newly estimated target position
patch = self.get_subwindow(self.image, self.pos, self.window_sz)
feat = self.get_features(patch)
xf = fftn(feat, axes=(0, 1))
# Kernel Ridge Regression, calculate alphas (in Fourier domain)
if self.correlation_type == 'gaussian':
kf = self.gaussian_correlation(xf, xf)
alphaf = np.divide(self.yf, (kf + self.lambda_))
# subsequent frames, interpolate model
self.model_alphaf = (1 - self.interp_factor) * self.model_alphaf + self.interp_factor * alphaf
self.model_xf = (1 - self.interp_factor) * self.model_xf + self.interp_factor * xf
if self.resize:
pos_real = np.multiply(self.pos, 2)
else:
pos_real = self.pos
box = [pos_real[1] - self.target_sz_real[1] / 2,
pos_real[0] - self.target_sz_real[0] / 2,
self.target_sz_real[1],
self.target_sz_real[0]]
return box[0], box[1], box[2], box[3]
def get_subwindow(self, im, pos, sz):
_p1 = np.array(range(0, int(sz[0]))).reshape([1, int(sz[0])])
_p2 = np.array(range(0, int(sz[1]))).reshape([1, int(sz[1])])
ys = np.floor(pos[0]) + _p1 - np.floor(sz[0]/2)
xs = np.floor(pos[1]) + _p2 - np.floor(sz[1]/2)
# Check for out-of-bounds coordinates, and set them to the values at the borders
xs[xs < 0] = 0
ys[ys < 0] = 0
xs[xs > np.size(im, 1) - 1] = np.size(im, 1) - 1
ys[ys > np.size(im, 0) - 1] = np.size(im, 0) - 1
xs = xs.astype(int)
ys = ys.astype(int)
# extract image
out1 = im[list(ys[0, :]), :, :]
out = out1[:, list(xs[0, :]), :]
return out
def get_features(self, im):
if self.feature == 'hog':
# HOG features, from Piotr's Toolbox
x = np.double(self.get_fhog(im))
return x * self.cos_window[:, :, None]
if self.feature == 'gray':
x = np.double(im) / 255
x = x - np.mean(x)
return x * self.cos_window[:, :, None]
if self.feature == 'deep':
x = self.get_deep_feature(im)
x = x / np.max(x)
return x * self.cos_window[:, :, None]
def get_fhog(self, im_patch):
H = get_hog(im_patch/255)
return H
def gaussian_correlation(self, xf, yf):
N = xf.shape[0] * xf.shape[1]
xff = xf.reshape([xf.shape[0] * xf.shape[1] * xf.shape[2], 1], order='F')
xff_T = xff.conj().T
yff = yf.reshape([yf.shape[0] * yf.shape[1] * yf.shape[2], 1], order='F')
yff_T = yff.conj().T
xx = | np.dot(xff_T, xff) | numpy.dot |
import itertools
import random
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
class FitClass:
"""
This class is used for sigma optimization via curve_fit.
"""
def __init__(self):
self.mu = []
def multi_modal(self, *args):
"""
This is a multi-modal Gaussian with fixed mean values mu.
:param args: contains the x value and the parameters
:return:
"""
idx_offset = 2
n = int((len(args) - 1) / idx_offset)
x = args[0]
gauss_sum = 0
for i in range(n):
mu = self.mu[i]
sigma = args[1 + (idx_offset * i)]
a = args[2 + (idx_offset * i)]
gauss_sum += a * np.exp(-((x - mu) ** 2.0) / 2.0 / sigma ** 2.0)
return gauss_sum
class GMPDA:
"""
GMPDA (Gaussian Mixture Periodicity Detection Algorithm) class for periodicity detection.
"""
def __init__(
self,
ts,
max_depth=2,
max_periods=2,
max_candidates=None,
mu_range_min=1,
mu_range_max=None,
noise_range=None,
sigma=None,
sigma_curvefit=False,
sigma_log_init=False,
random_walk=1,
loss_length=None,
loss_change_tol=0.001,
ref_loss_n=0,
gmpda_seed=10,
report_short=True,
):
"""
:param ts: (np 1-d array)
:param max_depth: (int)
:param max_periods:(int)
:param max_candidates:(int)
:param mu_range_min:(int)
:param mu_range_max:(int)
:param noise_range:'NaN' or (int)
:param sigma: 'NaN' or (int)
:param sigma_curvefit:(boolean)
:param sigma_log_init:(boolean)
:param random_walk:{0,1}
:param loss_length:(int)
:param loss_change_tol:(float) in [0, 1)
:param ref_loss_n:(int)
:param gmpda_seed:(int)
"""
np.random.seed(gmpda_seed)
random.seed(gmpda_seed)
self.max_depth = max_depth
self.max_periods = max_periods
self.max_candidates = max_candidates
if self.max_candidates >= mu_range_max - mu_range_min:
print(
"Warning: The set of candidate periodicities is [mu_range_min, mu_range_max], "
"as max_candidates >= mu_range_max - mu_range_min."
)
self.mu_range_max = mu_range_max
self.mu_range_min = mu_range_min
if mu_range_min < 5:
self.mu_range_min = 5
print("Require mu_range_min >= 5. Set mu_range_min = 5.")
if not sigma:
self.sigma = int(np.ceil(np.log(mu_range_min)))
else:
self.sigma = sigma
if not noise_range or noise_range > mu_range_min:
self.noise_range = mu_range_min
else:
self.noise_range = noise_range
self.random_walk = random_walk
self.loss_change_tol = loss_change_tol
self.sigma_curvefit = sigma_curvefit
self.sigma_log_init = sigma_log_init
# init ts
if np.shape(ts.shape)[0] < 2:
self.ts = ts.reshape((1, ts.shape[0]))
else:
n, m = ts.shape
if n > m:
self.ts = ts.reshape((m, n))
elif n < m:
self.ts = ts
self.len_ts = self.ts.shape[1]
self.event_set = np.where((self.ts == 1))[1]
"""Account for time segment of length > mu_range_max, i.e no events"""
self.len_es_ob = sum(np.diff(self.event_set)[np.diff(self.event_set) <= self.mu_range_max])
assert self.len_es_ob > 0, Warning("There are no intervals <= mu_range_max in the data.")
"""The loss_length is extended, in order to slice wrt loss_length inclusively."""
if loss_length <= self.mu_range_max + 3 * self.sigma:
loss_length = self.mu_range_max + 3 * self.sigma
print("Require loss_length > mu_range_max. Set loss_length = mu_range_max + 3*max(self.sigma)")
if loss_length + 1 > self.len_ts:
self.loss_length = self.len_ts
print(
f"Waring: loss_length={loss_length}, len_ts={self.len_ts}"
f"\nRequire loss_length + 1 < len(ts). Set loss_length = len(ts)."
)
else:
self.loss_length = loss_length + 1
self.ref_loss_n = ref_loss_n
self.report_short = report_short
@staticmethod
def round_down(n, decimals=6):
"""
Deterministic rounding by truncation.
:param n: float or array, numbers to be rounded
:param decimals:
:return: rounded float up to decimals
"""
multiplier = 10 ** decimals
if isinstance(n, np.ndarray):
return (n * multiplier).astype("int") / multiplier
elif isinstance(n, float):
return int(n * multiplier) / multiplier
elif isinstance(n, int):
return n
else:
print(f"Error: round_down() for type{type(n)} is not implemented.")
raise NotImplementedError
def reinit_ts(self, ts_):
"""
Reinit self configuration if a new time series is considered, this is the case for the local loss.
:param ts_:(np 1-d array)
:return: None
"""
if np.shape(ts_.shape)[0] < 2:
self.ts = ts_.reshape((1, ts_.shape[0]))
else:
n, m = ts_.shape
if n > m:
self.ts = ts_.reshape((m, n))
elif n < m:
self.ts = ts_
self.len_ts = self.ts.shape[1]
self.event_set = np.where((self.ts == 1))[1]
# Account for time segment of length > mu_range_max, i.e no events
self.len_es_ob = sum(np.diff(self.event_set)[np.diff(self.event_set) <= self.mu_range_max])
def get_ref_loss(self, n=100):
"""
Estimates the reference loss for the initialized event series.
:param n: number of samples
:return: array of losses
"""
self.ref_loss_n = 0
ts_origin = self.ts
sc_orig = self.sigma_curvefit
sigma_orig = self.sigma
self.sigma_curvefit = False
ref_loss = []
for i in range(0, n):
# Create a noise only time series
ts_noise = np.zeros_like(self.ts).astype("int")
idx_events = np.random.choice(range(max(ts_noise.shape)), int(self.ts.sum()), replace=False)
ts_noise[:, idx_events] = 1
self.sigma = sigma_orig
_, _, _, loss_, _ = self.extract_periods(ts=ts_noise, verbose=False)
ref_loss.append(loss_[0])
# Set self to origin ts
self.sigma_curvefit = sc_orig
self.sigma = sigma_orig
self.reinit_ts(ts_=ts_origin)
return ref_loss
def get_intervals(self):
"""
This function estimates the 1st, 2nd, 3rd, ..., Kth order intervals between positive observations.
:return: numpy ndarray (1,loss_length), frequencies
"""
dmu = np.zeros((1, self.len_ts))
for i in range(len(self.event_set) - 1):
pos_loc = self.event_set[i + 1 : :] - self.event_set[i]
dmu[:, pos_loc] += 1
# Restrict Dmu to a predefined length
dmu = dmu[:, : self.loss_length]
# Estimate noise contribution
z = np.median(dmu[:, 1 : self.noise_range])
idx = np.arange(0, self.loss_length)
zeta_mu = 0
if self.len_es_ob != 0 and z != 0 and ~np.isnan(z):
zeta_mu = z * (1 - (idx / self.len_es_ob))
idx_ = np.where(zeta_mu < 0)[0]
zeta_mu[idx_] = 0
# Remove noise
dmu_init = dmu.copy()
dmu[0, :] -= zeta_mu
# Contributions from the noise range should be neglected
dmu[0, 0 : self.noise_range] = 0
# With no noise in data, there are cases of dmu[:, idx] -= zeta_mu with negative results, set to 0
idx = np.where(dmu[0, :] < 0)[0]
dmu[0, idx] = 0
return dmu, dmu_init
def integral_convolution(self, dmu):
"""
Smooth Dmu to account for randomness in the process.
:param dmu: numpy ndarray, frequency table
:return: tau: numpy ndarray, smoothed/rolled frequency table
"""
len_dmu = dmu.shape[1]
tau_mu = np.zeros((1, len_dmu))
if self.sigma >= 1:
sigma = self.sigma # keep just one sigma, in order not to over-smooth
weight_ = 1 / np.arange(2, sigma + 2)
for k in np.arange(1, len_dmu, 1):
total_weight = np.concatenate([weight_[::-1], np.array([1]), weight_])
a = int(k - sigma)
if a < 1:
a = 1
if a == 1:
total_weight = total_weight[sigma - k + 1 :]
b = int(k + sigma + 1)
if b > len_dmu:
total_weight = total_weight[: len_dmu - b]
b = len_dmu
r = np.zeros((len_dmu, 1))
r[np.arange(a, b, 1)] = total_weight.reshape(-1, 1)
tau_mu[:, int(k)] = np.dot(dmu, r)
else:
tau_mu = dmu
return tau_mu
def explain_data(self, tau_mu, mu):
"""
Count the number of events, defined in tam_mu that are covered by the period and sigma.
:param tau_mu: numpy ndarray (1,loss_length), smoothed frequencies of intervals
:param mu: int, current periodicity
:return: float, score of how much of the process is explained by the periodicity
"""
len_tau = tau_mu.shape[1]
number_events = int(len_tau / mu)
# This is the integral-sum over all possible events associated with periodicity mu and sigma
conf_int = np.arange(-self.sigma, self.sigma + 1, 1)
mu_multp = (np.arange(1, number_events + 1) * mu).reshape(-1, 1) # +1 to have number of events, inclusive
mu_ci = (mu_multp - conf_int).reshape(-1, 1)
idx = np.where((mu_ci > 0) & (mu_ci < len_tau))[0]
domain = np.unique(mu_ci[idx].astype("int")) # Overlapping regions are counted ones.
return np.sum(tau_mu[:, domain])
def remove_explained(self, dmu, mu):
"""
Set dmu at mu +- sigma to zero.
:param dmu: np ndarray (1,loss_length), frequencies of intervals
:param mu: int, periodicity
:return: dmu: np ndarray (1,loss_length), frequencies of intervals remained after dropping frequency mu
"""
len_dmu = dmu.shape[1]
number_events = int(len_dmu / mu)
conf_int = np.arange(-self.sigma, self.sigma + 1, 1)
mu_multp = (np.arange(1, number_events + 1) * mu).reshape(-1, 1)
mu_ci = (mu_multp - conf_int).reshape(-1, 1)
idx = np.where((mu_ci > 0) & (mu_ci < len_dmu))[0]
domain = np.unique(mu_ci[idx].astype("int"))
dmu[:, domain] = 0
return dmu
def gauss_pdf_explain(self, mu, sigma):
"""
Evaluate the generative function.
:param mu: int
:param sigma: int
:return: gmu: np ndarray (len(mu),loss_length) - Implied Gaussians wrt mu/sigma
"""
gmu = np.zeros((1, self.loss_length))
# The number of events during the observed period, needs to be adjusted with respect to long intervals with no
# events (longer than mu + 1.96 * sigma). These intervals are disregarded when computing the maximum possible
# number of intervals.
len_es_mu = sum(np.diff(self.event_set)[np.diff(self.event_set) <= mu + 1.96 * sigma]) # here 1.96* correct
number_events_sn = int(len_es_mu / mu)
if number_events_sn < 2:
return np.nan * gmu
x_range = np.arange(1, number_events_sn, 1)
mu_x = mu * x_range
b_x = number_events_sn - (x_range - 1)
# Estimate gmu
sigma_x = np.sqrt(self.random_walk * (x_range - 1) + 1) * sigma
sigma_x2 = sigma_x ** 2
a_x = 1 / np.sqrt(2 * np.pi * sigma_x2)
for i in range(len(mu_x)):
conf_ = np.ceil(3 * sigma_x[i]) # multiplication with 3 ensures covering of 99% of the gauss pdf.
x = np.arange(int(max(1, mu_x[i] - conf_)), int(mu_x[i] + conf_ + 1), 1, dtype=np.int)
x = x[np.where(x < self.loss_length)]
gmu[:, x] += (a_x[i] * np.exp((-0.5 * (x - mu_x[i]) ** 2) / sigma_x2[i])) * b_x[i]
return gmu
def get_loss(self, dmu, gmu=[], mu_list=[], sigma_list=[]):
"""
Estimate loss for a a list of mu/sigma.
:param mu_list: list of int
:param sigma_list: list of int
:param gmu: np array of dim=(len(mu), dmu.shape[1])
:return: float
"""
if len(gmu) == 0:
gmu = self.get_gmu(mu_list, sigma_list)
diff_dmu_pmu = dmu - gmu.sum(axis=0)
loss = np.sum(np.abs(diff_dmu_pmu), axis=1) / np.sum(np.abs(dmu), axis=1)
return loss
def get_gmu(self, mu_list, sigma_list):
"""
Estimate gmu for a a list of mu/sigma.
:param mu_list: list of int
:param sigma_list: list of int
:return: tuple:
np array of dim=(len(mu), dmu.shape[1]),
float
"""
gmu = np.zeros((len(mu_list), self.loss_length))
for i in range(len(mu_list)):
gmu[i, :] = self.gauss_pdf_explain(mu_list[i], sigma_list[i])
return gmu
def get_sigma_init(self, dmu, mu_list, sigma_list):
"""
Optimize sigma for every single mu.
:param dmu:
:param mu_list:
:return: list
"""
sigma_new = []
for mu, sigma in zip(mu_list, sigma_list):
sigma = self.get_sigma(dmu, [mu], [sigma])
sigma_new.extend(sigma)
return sigma_new
def get_sigma(self, dmu, mu_list, sigma_list):
"""
Use curvefit to improve the guess of sigma.
:param dmu: np array of dim (1, self.loss_length)
:param mu_list: sorted list
:param sigma_list: list
:return: list
"""
mu_max = mu_list[-1]
mu_min = mu_list[0]
# 3 * self.sigma to cover 99 of pdf, +1 to have loss_length_ inclusive
loss_length_ = int(min(mu_max + np.ceil(3 * sigma_list[-1] + 1), dmu.shape[1]))
st = int(max((mu_min - 0.75 * mu_min), 0))
end = int(min((mu_max + 0.75 * mu_max + 1), dmu.shape[1]))
y = dmu[:, 0:loss_length_][0]
y = dmu[:, st:end][0]
x = np.arange(st, end)
init_params = []
mu_fc = []
b_up = []
for mu, sigma in zip(mu_list, sigma_list):
off_set = np.floor(mu_max / mu).astype("int")
for i in range(off_set):
mu_fc.append(float(mu * (i + 1)))
init_params.append(max(min(float(sigma), np.ceil(np.log(mu_max)) - 1), 1))
init_params.append(1.0)
b_up.append(0.25 * mu * (i + 1))
b_up.append(np.inf)
# Lower bounds
b_low = np.ones((1, len(init_params)))
b_low[0, 1::2] = 0.0 # for a, set lower bound to zero, no upper bound as dmu is not normalized.
# Fit dmu curve
fc = FitClass()
fc.mu = mu_fc
params, _ = curve_fit(fc.multi_modal, x, y, init_params, bounds=([b_low.tolist()[0], b_up]))
results = np.zeros((int(len(params) / 2), 2))
for i in range(int(len(params) / 2)):
row = i * 2
results[i, :] = [params[row], params[row + 1]]
off_set = 0
sigma_new = []
for mu in mu_list:
sigma_new.append( | np.round(results[off_set, 0]) | numpy.round |
"""
This example demonstrates how to use the active learning interface with Keras.
The example uses the scikit-learn wrappers of Keras. For more info, see https://keras.io/scikit-learn-api/
"""
import keras
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.wrappers.scikit_learn import KerasClassifier
from modAL.models import ActiveLearner
# build function for the Keras' scikit-learn API
def create_keras_model():
"""
This function compiles and returns a Keras model.
Should be passed to KerasClassifier in the Keras scikit-learn API.
"""
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
return model
# create the classifier
classifier = KerasClassifier(create_keras_model)
"""
Data wrangling
1. Reading data from Keras
2. Assembling initial training data for ActiveLearner
3. Generating the pool
"""
# read training data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(10000, 28, 28, 1).astype('float32') / 255
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
# assemble initial data
n_initial = 1000
initial_idx = np.random.choice(range(len(X_train)), size=n_initial, replace=False)
X_initial = X_train[initial_idx]
y_initial = y_train[initial_idx]
# generate the pool
# remove the initial data from the training dataset
X_pool = np.delete(X_train, initial_idx, axis=0)
y_pool = np.delete(y_train, initial_idx, axis=0)
"""
Training the ActiveLearner
"""
# initialize ActiveLearner
learner = ActiveLearner(
estimator=classifier,
X_training=X_initial, y_training=y_initial,
verbose=1
)
# the active learning loop
n_queries = 10
for idx in range(n_queries):
query_idx, query_instance = learner.query(X_pool, n_instances=100, verbose=0)
print(query_idx)
learner.teach(
X=X_pool[query_idx], y=y_pool[query_idx], only_new=True,
verbose=1
)
# remove queried instance from pool
X_pool = np.delete(X_pool, query_idx, axis=0)
y_pool = | np.delete(y_pool, query_idx, axis=0) | numpy.delete |
# -*- coding = utf-8 -*-
# @Author:何欣泽
# @Time:2020/11/4 17:31
# @File:RNN.py
# @Software:PyCharm
import os
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import *
import numpy as np
import librosa
def generateDataset(woman_path, mixed_path):
samples_woman, _ = librosa.load(woman_path, sr=8000)
# samples_man, _ = librosa.load(man_file, sr=8000)
mixed_series, _ = librosa.load(mixed_path, sr=8000)
win_length = 256
hop_length = 64
nfft = 512
mix_spectrum = librosa.stft(mixed_series, win_length=win_length, hop_length=hop_length, n_fft=nfft)
woman_spectrum = librosa.stft(samples_woman, win_length=win_length, hop_length=hop_length, n_fft=nfft)
# man_spectrum = librosa.stft(samples_man, win_length=win_length, hop_length=hop_length, n_fft=nfft)
woman_mag = np.abs(woman_spectrum.T)
mix_mag = np.abs(mix_spectrum.T)
mask = IRM(woman_mag, mix_mag)
return mix_mag, mask
def IRM(clean_spectrum, mix_spectrum):
snr = np.divide(np.abs(clean_spectrum), np.abs(mix_spectrum))
# IRM
mask = snr / (snr + 1)
mask[np.isnan(mask)] = 0.5
mask = np.power(mask, 0.5)
return mask
def get_model():
model = keras.models.Sequential()
model.add(keras.layers.LSTM(512, return_sequences=True))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.1))
model.add(keras.layers.LSTM(1024, return_sequences=True))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.1))
model.add(keras.layers.Dense(257))
model.add(BatchNormalization())
model.add(Activation('sigmoid'))
return model
def train(model, train_x, train_y, text_x, text_y):
model.compile(loss='mse', optimizer='adam', metrics=['mse'], )
cheakpoint_save_path = './cheakpoint/LSTMfunction23(2).ckpt'
if os.path.exists(cheakpoint_save_path + '.index'):
print('-------------load the model-----------')
model.load_weights(cheakpoint_save_path)
RNN_callback = tf.keras.callbacks.ModelCheckpoint(filepath=cheakpoint_save_path,
save_weights_only=True,
save_best_only=True,
monitor='val_loss')
history = model.fit(train_x, train_y,
batch_size=1, epochs=100, validation_split=0.,
validation_data=(text_x, text_y),
validation_freq=5,
callbacks=[RNN_callback])
model.save("./model/LSTMfunction23_model(2).h5")
print(model.summary())
loss = history.history['loss']
val_loss = history.history['val_loss']
return loss, val_loss
def main():
global train_x, train_y, text_x, text_y
num = 1
cout = 1
for i in range(1, 30):
clean_path = r'C:\Users\MACHENIKE\Desktop\数字信号处理B\项目\woman_series\woman_speech{}.wav'.format(i)
mix_path = r'C:\Users\MACHENIKE\Desktop\数字信号处理B\项目\mixed_series\mixed_series{}.wav'.format(i)
feature, label = generateDataset(clean_path, mix_path)
if np.shape(feature[:, 0]) == (720,):
print(i)
if cout == 2:
train_x = [feature, train_x]
elif cout == 1:
train_x = feature
else:
train_x = np.insert(train_x, 0, feature, axis=0)
if np.shape(label[:, 0]) == (720,):
if cout == 2:
train_y = [label, train_y]
elif cout == 1:
train_y = label
else:
train_y = | np.insert(train_y, 0, label, axis=0) | numpy.insert |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys, yaml, time
import copy
import torch
import pickle, matplotlib
import numpy as np
import matplotlib.pyplot as plt
from models.RITnet_v2 import DenseNet2D
from args import parse_args
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from helperfunctions import mypause, linVal
from pytorchtools import EarlyStopping, load_from_file
from utils import get_nparams, Logger, get_predictions, lossandaccuracy
from utils import getSeg_metrics, getPoint_metric, generateImageGrid, unnormPts
from utils import getAng_metric, calc_edge
from test import calc_acc
from bdcn_new import BDCN
from torchvision.utils import make_grid
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), os.pardir)))
#%%
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE" # Deactive file locking
embed_log = 5
EPS=1e-7
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def get_config(config):
with open(config, 'r') as stream:
return yaml.load(stream)
def mypause(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
if __name__ == '__main__':
args = parse_args()
setting = get_config(args.setting)
print('Setting : ')
print(setting)
device=torch.device("cuda")
if torch.cuda.device_count() > 1:
print('Moving to a multiGPU setup.')
args.useMultiGPU = True
else:
print('Single GPU setup')
args.useMultiGPU = False
LOGDIR = os.path.join(os.getcwd(), 'logs', args.model, args.expname)
path2model = os.path.join(LOGDIR, 'weights')
path2writer = os.path.join(LOGDIR, 'TB.lock')
path2checkpoint = os.path.join(LOGDIR, 'checkpoints')
path2pretrained = os.path.join(os.getcwd(),
'logs',
args.model,
'pretrained',
'weights',
'pretrained.git_ok')
# Generate directories if they don't exist
os.makedirs(LOGDIR, exist_ok=True)
os.makedirs(path2model, exist_ok=True)
os.makedirs(path2checkpoint, exist_ok=True)
os.makedirs(path2writer, exist_ok=True)
# Open relevant train/test object
f = open(os.path.join(args.test_mode,'cond_'+str(args.curObj)+'.pkl'), 'rb')
trainObj, validObj, _ = pickle.load(f)
trainObj.augFlag = True
validObj.augFlag = False
ff = open(os.path.join('baseline', 'cond_LPW.pkl'), 'rb')
lpw_validObj, _, lpw_testObj = pickle.load(ff)
lpw_validObj.augFlag = False
lpw_testObj.augFlag = False
if(args.id == 0):
trainObj.path2data = os.path.join(args.path2data, 'Datasets', 'All/New')
validObj.path2data = os.path.join(args.path2data, 'Datasets', 'All/New')
lpw_validObj.path2data = os.path.join(args.path2data, 'Datasets', 'TEyeD-h5-Edges')
lpw_testObj.path2data = os.path.join(args.path2data, 'Datasets', 'TEyeD-h5-Edges')
elif(args.id == 1):
trainObj.path2data = os.path.join(args.path2data, 'userdata/riteye.zip/All/New')
validObj.path2data = os.path.join(args.path2data, 'userdata/riteye.zip/All/New')
lpw_validObj.path2data = os.path.join(args.path2data, 'userdata/lpw.zip/TEyeD-h5-Edges')
lpw_testObj.path2data = os.path.join(args.path2data, 'userdata/lpw.zip/TEyeD-h5-Edges')
else:
assert(1 == 2), 'illeagel id'
print(' train, valid, lpw_valid, lpw_testObj: ', len(trainObj), len(validObj), len(lpw_validObj), len(lpw_testObj))
lpw_validloader = DataLoader(lpw_validObj,
batch_size=args.batchsize,
shuffle=False,
drop_last=True,
num_workers=args.workers)
lpw_testloader = DataLoader(lpw_testObj,
batch_size=args.batchsize,
shuffle=False,
drop_last=True,
num_workers=args.workers)
# load model
BDCN_network = BDCN()
state_dict = torch.load('gen_00000016.pt')
BDCN_network.load_state_dict(state_dict['a'])
BDCN_network = BDCN_network.cuda()
BDCN_network.eval()
writer = SummaryWriter(path2writer)
logger = Logger(os.path.join(LOGDIR,'logs.log'))
# Ensure model has all necessary weights initialized
model = DenseNet2D(setting)
model.selfCorr = args.selfCorr
model.disentangle = args.disentangle
param_list = [param for name, param in model.named_parameters() if 'dsIdentify' not in name]
optimizer = torch.optim.Adam([{'params':param_list,
'lr':args.lr}]) # Set optimizer
# If loading pretrained weights, ensure you don't load confusion branch
if args.resume:
print ("NOTE resuming training. Priority: 1) Checkpoint 2) Epoch #")
checkpointfile = os.path.join(path2checkpoint, 'checkpoint.pt')
model = model.to(device)
netDict = load_from_file([checkpointfile, args.loadfile])
# Load previous checkpoint and resume from that epoch
model.load_state_dict(netDict['state_dict'])
startEp = netDict['epoch'] + 1 if 'epoch' in netDict.keys() else 0
elif 'pretrained' not in args.expname:
# If the very first epoch, then save out an _init pickle
# This is particularly useful for lottery tickets
print('Searching for pretrained weights ...')
if os.path.exists(path2pretrained):
netDict = torch.load(path2pretrained)
model.load_state_dict(netDict['state_dict'])
print('Pretrained weights loaded! Enjoy the ride ...')
else:
print('No pretrained. Warning. Training on only pupil centers leads to instability.')
startEp = 0
torch.save(model.state_dict(), os.path.join(path2model, args.model+'{}.pkl'.format('_init')))
else:
startEp = 0
print('Pretraining mode detected ...')
torch.save(model.state_dict(), os.path.join(path2model, args.model+'{}.pkl'.format('_init')))
# Let the network know you need a disentanglement module.
# Please refer to args.py for more information on disentanglement strategy
if args.disentangle:
# Let the model know how many datasets it must expect
print('Total # of datasets found: {}'.format(np.unique(trainObj.imList[:, 2]).size))
model.setDatasetInfo(np.unique(trainObj.imList[:, 2]).size)
opt_disent = torch.optim.Adam(model.dsIdentify_lin.parameters(), lr=1*args.lr)
nparams = get_nparams(model)
print('Total number of trainable parameters: {}\n'.format(nparams))
patience = 10
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
'max',
patience=patience-5,
verbose=True,
factor=0.1) # Default factor = 0.1
early_stopping = EarlyStopping(mode='max',
delta=0.001,
verbose=True,
patience=patience,
fName='checkpoint.pt',
path2save=path2checkpoint)
model = model if not args.useMultiGPU else torch.nn.DataParallel(model)
model = model.to(device).to(args.prec) # NOTE: good habit to do this before optimizer
if args.overfit > 0:
# This is a flag to check if attempting to overfit on a small subset
# This is used as a quick check to verify training process
trainObj.imList = trainObj.imList[:args.overfit*args.batchsize,:]
validObj.imList = validObj.imList[:args.overfit*args.batchsize,:]
trainloader = DataLoader(trainObj,
batch_size=args.batchsize,
shuffle=True,
num_workers=args.workers,
drop_last=True)
validloader = DataLoader(validObj,
batch_size=args.batchsize,
shuffle=False,
num_workers=args.workers,
drop_last=True)
if args.disp:
fig, axs = plt.subplots(nrows=1, ncols=1)
#%%
print('!!!!!-----MODEL STRUCTURE------------')
tot_parameters = 0
for k, v in model.named_parameters():
print(k, v.shape)
tot_parameters += v.numel()
print('tot_para: ', tot_parameters)
print('------------------------------------\n\n\n')
print('trainObj, validObj, lpw_trainObj, lpw_testObj', len(trainObj), len(validObj), len(lpw_validObj), len(lpw_testObj))
print(args.workers)
max_miou = 0.0
min_pup_c = 30.0
#miou_test, pct_test, ict_test = calc_acc(args, lpw_testloader, model, BDCN_network, device)
# calc time for analyse
start_time = time.time()
calc_edge_time = 0
calc_network_time = 0
# print edge
# start_edge = time.time()
# print('!!!Test ', args.batchsize)
# print('args.prec: ', args.prec)
# ans = []
# for bt, batchdata in enumerate(trainloader):
# if(bt > 62):break
# img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
# with torch.no_grad():
# a = torch.cat((img, img, img), dim=1).to(device).to(args.prec)
# ans.append(a.shape)
# img_edge = BDCN_network(torch.cat((img, img, img), dim=1).to(device).to(args.prec))[-1]
# #calc_edge_time += time.time() - start_edge
# print('test edge time : {:.3f}'.format(time.time() - start_edge))
# print('num:', len(ans))
# miou_valid2, pct_valid2, tg = calc_acc(args, lpw_validloader, model, BDCN_network, device)
for epoch in range(startEp, args.epochs):
accLoss = 0.0
ious = []
scoreType = {'c_dist':[], 'ang_dist': [], 'sc_rat': []}
scoreTrack = {'pupil': copy.deepcopy(scoreType),
'iris': copy.deepcopy(scoreType)}
model.train()
alpha = linVal(epoch, (0, args.epochs), (0, 1), 0)
for bt, batchdata in enumerate(trainloader):
if(args.test_normal and bt > 10):break
img, labels, spatialWeights, distMap, pupil_center, iris_center, elNorm, cond, imInfo = batchdata
start_edge = time.time()
img_edge = calc_edge(args, img, BDCN_network, device)
calc_edge_time += time.time() - start_edge
# # Show edge imgs
# pred = 255 - img_edge.cpu().data.numpy()[0, 0, :, :] * 255
# I_o = []
# image_ori = torch.cat((img, img, img), dim=1)
# image_ori = image_ori[0, :, ].cpu().numpy()
# image_ori = image_ori / image_ori.max()
# image_ori = np.moveaxis(image_ori, 0, 2)
#
# pred2 = np.stack([pred for i in range(0, 3)], axis=2)
# pred2 = pred2 / pred2.max()
# print('image_ori', image_ori.shape)
# print('pred2', pred2.shape)
# I_o.append(image_ori)
# I_o.append(pred2)
# I_o = np.stack(I_o, axis=0)
#
# I_o = np.moveaxis(I_o, 3, 1)
# I_o = make_grid(torch.from_numpy(I_o).to(torch.float), nrow=1)
# if bt == 0:
# h_im = plt.imshow(I_o.permute(1, 2, 0))
# plt.pause(1)
# else:
# h_im.set_data(I_o.permute(1, 2, 0))
# mypause(1)
optimizer.zero_grad()
start_network = time.time()
out_tup = model(img.to(device).to(args.prec),
img_edge,
labels.to(device).long(),
pupil_center.to(device).to(args.prec),
elNorm.to(device).to(args.prec),
spatialWeights.to(device).to(args.prec),
distMap.to(device).to(args.prec),
cond.to(device).to(args.prec),
imInfo[:, 2].to(device).to(torch.long), # Send DS #
alpha)
calc_network_time += time.time() - start_network
output, elOut, _, loss, _ = out_tup
loss = loss.mean() if args.useMultiGPU else loss
loss.backward()
optimizer.step()
# Predicted centers
pred_c_iri = elOut[:, 0:2].detach().cpu().numpy()
pred_c_pup = elOut[:, 5:7].detach().cpu().numpy()
accLoss += loss.detach().cpu().item()
predict = get_predictions(output)
# IOU metric
iou = getSeg_metrics(labels.numpy(),
predict.numpy(),
cond[:, 1].numpy())[1]
ious.append(iou)
# Center distance
ptDist_iri = getPoint_metric(iris_center.numpy(),
pred_c_iri,
cond[:,1].numpy(),
img.shape[2:],
True)[0] # Unnormalizes the points
ptDist_pup = getPoint_metric(pupil_center.numpy(),
pred_c_pup,
cond[:,0].numpy(),
img.shape[2:],
True)[0] # Unnormalizes the points
# Angular distance
angDist_iri = getAng_metric(elNorm[:, 0, 4].numpy(),
elOut[:, 4].detach().cpu().numpy(),
cond[:, 1].numpy())[0]
angDist_pup = getAng_metric(elNorm[:, 1, 4].numpy(),
elOut[:, 9].detach().cpu().numpy(),
cond[:, 1].numpy())[0]
# Scale metric
gt_ab = elNorm[:, 0, 2:4]
pred_ab = elOut[:, 2:4].cpu().detach()
scale_iri = torch.sqrt(torch.sum(gt_ab**2, dim=1)/torch.sum(pred_ab**2, dim=1))
scale_iri = torch.sum(scale_iri*(~cond[:,1]).to(torch.float32)).item()
gt_ab = elNorm[:, 1, 2:4]
pred_ab = elOut[:, 7:9].cpu().detach()
scale_pup = torch.sqrt(torch.sum(gt_ab**2, dim=1)/torch.sum(pred_ab**2, dim=1))
scale_pup = torch.sum(scale_pup*(~cond[:,1]).to(torch.float32)).item()
# Append to score dictionary
scoreTrack['iris']['c_dist'].append(ptDist_iri)
scoreTrack['iris']['ang_dist'].append(angDist_iri)
scoreTrack['iris']['sc_rat'].append(scale_iri)
scoreTrack['pupil']['c_dist'].append(ptDist_pup)
scoreTrack['pupil']['ang_dist'].append(angDist_pup)
scoreTrack['pupil']['sc_rat'].append(scale_pup)
iri_c = unnormPts(pred_c_iri,
img.shape[2:])
pup_c = unnormPts(pred_c_pup,
img.shape[2:])
if args.disp:
# Generate image grid with overlayed predicted data
dispI = generateImageGrid(img.squeeze().numpy(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_c,
cond.numpy(),
override=True,
heatmaps=False)
if (epoch == startEp) and (bt == 0):
h_im = plt.imshow(dispI.permute(1, 2, 0))
plt.pause(0.01)
else:
h_im.set_data(dispI.permute(1, 2, 0))
mypause(0.01)
if bt%30 == 0:
now_time = time.time()
logger.write('Epoch:{} [{}/{}], Loss: {:.3f} time:{:.3f}'.format(epoch,
bt,
len(trainloader),
loss.item(),
now_time - start_time))
print('calc_edge time : {:.3f}'.format(calc_edge_time))
print('calc_network_time : {:.3f}'.format(calc_network_time))
calc_edge_time = 0
calc_network_time = 0
start_time = now_time
valid_st = time.time()
# Sketch the very last batch. Training drops uneven batches..
dispI = generateImageGrid(img.squeeze().numpy(),
predict.numpy(),
elOut.detach().cpu().numpy().reshape(-1, 2, 5),
pup_c,
cond.numpy(),
override=True,
heatmaps=False)
ious = np.stack(ious, axis=0)
ious = np.nanmean(ious, axis=0)
logger.write('Epoch:{}, Train IoU: {}'.format(epoch, ious))
out_tup = lossandaccuracy(args, # Training arguments
validloader, # Validation loader
model, # Model
BDCN_network,
alpha, # Alpha value to measure loss
device)
lossvalid, ious_valid, scoreTrack_v, latent_codes= out_tup
print('valid_loader time : {:.3f}'.format(time.time() - valid_st))
valid_st = time.time()
# Add iris info to tensorboard
writer.add_scalars('iri_c/mu', {'train':np.nanmean(scoreTrack['iris']['c_dist']),
'valid':np.nanmean(scoreTrack_v['iris']['c_dist'])}, epoch)
writer.add_scalars('iri_c/std', {'train':np.nanstd(scoreTrack['iris']['c_dist']),
'valid':np.nanstd(scoreTrack_v['iris']['c_dist'])}, epoch)
writer.add_scalars('iri_ang/mu', {'train':np.nanmean(scoreTrack['iris']['ang_dist']),
'valid':np.nanmean(scoreTrack_v['iris']['ang_dist'])}, epoch)
writer.add_scalars('iri_ang/std', {'train':np.nanstd(scoreTrack['iris']['ang_dist']),
'valid':np.nanstd(scoreTrack_v['iris']['ang_dist'])}, epoch)
writer.add_scalars('iri_sc/mu', {'train':np.nanmean(scoreTrack['iris']['sc_rat']),
'valid':np.nanmean(scoreTrack_v['iris']['sc_rat'])}, epoch)
writer.add_scalars('iri_sc/std', {'train':np.nanstd(scoreTrack['iris']['sc_rat']),
'valid':np.nanstd(scoreTrack_v['iris']['sc_rat'])}, epoch)
# Add pupil info to tensorboard
writer.add_scalars('pup_c/mu', {'train':np.nanmean(scoreTrack['pupil']['c_dist']),
'valid':np.nanmean(scoreTrack_v['pupil']['c_dist'])}, epoch)
writer.add_scalars('pup_c/std', {'train':np.nanstd(scoreTrack['pupil']['c_dist']),
'valid':np.nanstd(scoreTrack_v['pupil']['c_dist'])}, epoch)
writer.add_scalars('pup_ang/mu', {'train':np.nanmean(scoreTrack['pupil']['ang_dist']),
'valid':np.nanmean(scoreTrack_v['pupil']['ang_dist'])}, epoch)
writer.add_scalars('pup_ang/std', {'train':np.nanstd(scoreTrack['pupil']['ang_dist']),
'valid':np.nanstd(scoreTrack_v['pupil']['ang_dist'])}, epoch)
writer.add_scalars('pup_sc/mu', {'train':np.nanmean(scoreTrack['pupil']['sc_rat']),
'valid':np.nanmean(scoreTrack_v['pupil']['sc_rat'])}, epoch)
writer.add_scalars('pup_sc/std', {'train':np.nanstd(scoreTrack['pupil']['sc_rat']),
'valid':np.nanstd(scoreTrack_v['pupil']['sc_rat'])}, epoch)
writer.add_scalar('loss/train', accLoss/bt, epoch)
writer.add_scalar('loss/valid', lossvalid, epoch)
# Write image to tensorboardX
writer.add_image('train/op', dispI, epoch)
if epoch%embed_log == 0:
print('Saving validation embeddings ...')
writer.add_embedding(torch.cat(latent_codes, 0),
metadata=validObj.imList[:len(latent_codes)*args.batchsize, 2],
global_step=epoch)
f = 'Epoch:{}, Valid Loss: {:.3f}, mIoU: {}'
logger.write(f.format(epoch, lossvalid, np.mean(ious)))
# Generate a model dictionary which stores epochs and current state
netDict = {'state_dict':[], 'epoch': epoch}
stateDict = model.state_dict() if not args.useMultiGPU else model.module.state_dict()
netDict['state_dict'] = {k: v for k, v in stateDict.items() if 'dsIdentify_lin' not in k}
pup_c_dist = np.nanmean(scoreTrack_v['pupil']['c_dist'])
pup_ang_dist = | np.nanmean(scoreTrack_v['pupil']['ang_dist']) | numpy.nanmean |
"""
principal set of transformations defining RA-Dec to Galactic
coordinates, as per Gaia instructions.
# see https://gea.esac.esa.int/archive/documentation/GDR2/Data_processing/chap_cu3ast/sec_cu3ast_intro/ssec_cu3ast_intro_tansforms.html
# TODO: find where I did the Cartesian velocities.
# TODO: add the translation to the solar location (rather, away from)
# add EXAMPLES
04-Mar-2021: add arbitrary rotations
"""
legacy = False
import numpy as np
if legacy:
# this is all legacy. compare and then delete if not needed
def return_gaia_Agprime():
"""return the matrix in eq 3.61, key to transform from ICRS to galactic coordinates"""
return np.array([[-0.0548755604162154,-0.8734370902348850,-0.4838350155487132],
[+0.4941094278755837,-0.4448296299600112,+0.7469822444972189],
[-0.8676661490190047,-0.1980763734312015,+0.4559837761750669]])
def return_ricrs(a,d):
""" eq."""
return np.array([np.cos(a)*np.cos(d),np.sin(a)*np.cos(d),np.sin(d)]).T
def return_picrs(a,d):
""" eq. 3.64, unit vector of increasing alpha"""
return np.array([-np.sin(a),np.cos(a),0.]).T
def return_qicrs(a,d):
""" eq. 3.64, unit vector of increasing delta"""
return np.array([-np.cos(a)*np.sin(d),-np.sin(a)*np.sin(d),np.cos(d)]).T
def return_muicrs(a,d,mua,mud):
""" eq. 3.66, the proper motion vector"""
p = return_picrs(a,d)
q = return_qicrs(a,d)
return np.dot(p,mua) + np.dot(q,mud)
def return_rgal(l,b):
""" eq."""
return np.array([np.cos(l)*np.cos(b),np.sin(l)*np.cos(b),np.sin(b)]).T
def return_pgal(l,b):
""" eq. 3.66, unit vector of increasing alpha"""
return np.array([-np.sin(l),np.cos(l),0.]).T
def return_qgal(l,b):
""" eq. 3.66, unit vector of increasing delta"""
return np.array([-np.cos(l)*np.sin(b),-np.sin(l)*np.sin(b),np.cos(b)]).T
def return_mugal(l,b,mul,mub):
""" eq. 3.66, the proper motion vector"""
p = return_pgal(l,b)
q = return_qgal(l,b)
return np.dot(p,mul) + np.dot(q,mub)
def rotate_velocities(a,d,mua,mud):
"""eq 3.68, """
mu = return_muicrs(a,d,mua,mud)
mugal = np.dot(return_gaia_Agprime(),mu) # eq. 3.68
# solve for positions
ricrs = return_ricrs(a,d)
rgal = np.dot(return_gaia_Agprime(),ricrs)
# implement eq 3.63
ell,b = np.arctan2(rgal[1],rgal[0]),np.arctan2(rgal[2],np.sqrt(rgal[0]*rgal[0]+rgal[1]*rgal[1]))
p = return_pgal(ell,b)
q = return_qgal(ell,b)
mul = np.dot(p.T,mugal)
mub = np.dot(q.T,mugal)
#print(mul,mub)
return mul,mub
def rotate_errors(a,d,pmra_e,pmdec_e,pmcorr):
"""rotate covariance error from ra/dec to l/b."""
ricrs = return_ricrs(a,d)
picrs = return_picrs(a,d)
qicrs = return_qicrs(a,d)
rgal = np.dot(return_gaia_Agprime(),ricrs)
# implement eq 3.63
ell = np.arctan2(rgal[1],rgal[0])
b = np.arctan2(rgal[2],np.sqrt(rgal[0]*rgal[0]+rgal[1]*rgal[1]))
pgal = return_pgal(ell,b)
qgal = return_qgal(ell,b)
pqgal = np.stack((pgal, qgal), axis=-1)
pqicrs = np.stack((picrs, qicrs), axis=-1)
cov = np.array([[pmra_e*pmra_e,pmra_e*pmdec_e*pmcorr],[pmra_e*pmdec_e*pmcorr,pmdec_e*pmdec_e]])
#print(cov)
G = np.einsum('ab,ac->bc', pqgal,
np.einsum('ji,ik->jk', return_gaia_Agprime(), pqicrs))
cov_to = np.einsum('ba,ac->bc', G,
np.einsum('ij,ki->jk', cov, G))
return cov_to
def rotate_positions(a,d):
"""helper transformation from ra/dec to l/b"""
ricrs = return_ricrs(a,d)
rgal = np.dot(return_gaia_Agprime(),ricrs)
ell = np.arctan2(rgal[1],rgal[0])
b = np.arctan2(rgal[2],np.sqrt(rgal[0]*rgal[0]+rgal[1]*rgal[1]))
return ell,b
def rotate_positions_cartesian(a,d,r):
"""following galactic conventions"""
ricrs = return_ricrs(a,d)
rgal = np.dot(return_gaia_Agprime(),ricrs)
x = rgal[0]*r
y = rgal[1]*r
z = rgal[2]*r
return x,y,z
def rotate_velocities_cartesian(a,d,r,mua,mud,vlos):
"""following galactic conventions. This is a right-handed system, I think."""
ell,b = rotate_positions(a,d)
mul,mub = rotate_velocities(a,d,mua,mud)
k = 4.74057
vl,vb = k*mul*r,k*mub*r # transform to km/s
cost,sint = np.cos(b),np.sin(b)
cosp,sinp = np.cos(ell),np.sin(ell)
xdot = cost*cosp*vlos - sint*cosp*vb - sinp*vl
ydot = cost*sinp*vlos - sint*sinp*vb + cosp*vl
zdot = sint *vlos + cost *vb
return xdot,ydot,zdot
def return_gaia_Agprime():
"""return the matrix in eq 3.61, key to transform from ICRS to galactic coordinates"""
return np.array([[-0.0548755604162154,-0.8734370902348850,-0.4838350155487132],
[+0.4941094278755837,-0.4448296299600112,+0.7469822444972189],
[-0.8676661490190047,-0.1980763734312015,+0.4559837761750669]])
def return_gaia_Ag():
"""set the Hipparcos computation
if truly obsessed see https://www.cosmos.esa.int/documents/532822/552851/vol1_all.pdf
though this has higher precision!!
"""
return np.array([[-0.0548755604162154,+0.4941094278755837,-0.8676661490190047],
[-0.8734370902348850,-0.4448296299600112,-0.1980763734312015],
[-0.4838350155487132,+0.7469822444972189,+0.4559837761750669]])
def return_ricrs(a,d):
""" eq. 3.57"""
return np.array([np.cos(a)*np.cos(d),np.sin(a)*np.cos(d),np.sin(d)])
def return_picrs(a,d):
""" eq. 3.64, unit vector of increasing alpha"""
if hasattr(a,'size'):
return np.array([-np.sin(a),np.cos(a),np.zeros(a.size)])
else:
return np.array([-np.sin(a),np.cos(a),0.])
def return_qicrs(a,d):
""" eq. 3.64, unit vector of increasing delta"""
return np.array([-np.cos(a)*np.sin(d),-np.sin(a)*np.sin(d),np.cos(d)])
def return_muicrs(a,d,mua,mud):
""" eq. 3.66, the proper motion vector"""
p = return_picrs(a,d)
q = return_qicrs(a,d)
return p*mua + q*mud
def return_rgal(l,b):
""" eq. 3.58"""
return np.array([np.cos(l)*np.cos(b),np.sin(l)*np.cos(b),np.sin(b)])
def return_pgal(l,b):
""" eq. 3.66, unit vector of increasing alpha"""
if hasattr(l,'size'):
return np.array([-np.sin(l),np.cos(l),0.*np.cos(l)])
else:
return np.array([-np.sin(l),np.cos(l),0.*np.cos(l)])
def return_qgal(l,b):
""" eq. 3.66, unit vector of increasing delta"""
return np.array([-np.cos(l)*np.sin(b),-np.sin(l)*np.sin(b),np.cos(b)])
def return_mugal(l,b,mul,mub):
""" eq. 3.66, the proper motion vector"""
p = return_pgal(l,b)
q = return_qgal(l,b)
return p*mul + q*mub
def rotate_velocities(a,d,mua,mud):
"""eq 3.68, """
mu = return_muicrs(a,d,mua,mud)
mugal = np.dot(return_gaia_Agprime(),mu) # eq. 3.68
# solve for positions
ricrs = return_ricrs(a,d)
rgal = np.dot(return_gaia_Agprime(),ricrs)
# implement eq 3.63
ell,b = np.arctan2(rgal[1],rgal[0]),np.arctan2(rgal[2],np.sqrt(rgal[0]*rgal[0]+rgal[1]*rgal[1]))
p = return_pgal(ell,b)
q = return_qgal(ell,b)
mul = np.sum(p*mugal,axis=0)
mub = np.sum(q*mugal,axis=0)
#print(mul,mub)
return mul,mub
def rotate_errors(a,d,pmra_e,pmdec_e,pmcorr):
ricrs = return_ricrs(a,d)
picrs = return_picrs(a,d)
qicrs = return_qicrs(a,d)
rgal = np.dot(return_gaia_Agprime(),ricrs)
# implement eq 3.63
ell = np.arctan2(rgal[1],rgal[0])
b = np.arctan2(rgal[2],np.sqrt(rgal[0]*rgal[0]+rgal[1]*rgal[1]))
pgal = return_pgal(ell,b)
qgal = return_qgal(ell,b)
pqgal = | np.stack((pgal, qgal), axis=-1) | numpy.stack |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for point clouds."""
import gin
import gin.tf
import numpy as np
from scipy import spatial
import tensorflow as tf
def flip_normals_towards_viewpoint(points, normals, viewpoint):
"""Flips the normals to face towards the view point.
Args:
points: A tf.float32 tensor of size [N, 3].
normals: A tf.float32 tensor of size [N, 3].
viewpoint: A tf.float32 tensor of size [3].
Returns:
flipped_normals: A tf.float32 tensor of size [N, 3].
"""
# (viewpoint - point)
view_vector = tf.expand_dims(viewpoint, axis=0) - points
# Dot product between the (viewpoint - point) and the plane normal
cos_theta = tf.expand_dims(
tf.reduce_sum(view_vector * normals, axis=1), axis=1)
# Revert normals where cos is negative.
normals *= tf.sign(tf.tile(cos_theta, [1, 3]))
return normals
def points_to_normals_unbatched(points,
k,
distance_upper_bound,
viewpoint=None,
noise_magnitude=1e-4,
method='pca'):
"""Computes normals for the points in a point cloud.
Args:
points: A tf.float32 tensor of size [N, 3].
k: An integer determining the size of the neighborhood.
distance_upper_bound: Maximum distance of the neighbor points. If None, it
will not add a cap on the distance.
viewpoint: A tf.float32 tensor of size [3]. Normals will be flipped to point
towards view point. If None, it won't be used.
noise_magnitude: Noise magnitude to be added to the input of svd. If None,
it won't add noise.
method: The normal prediction method, options are `pca` and `cross` (cross
product).
Returns:
normals: A tf.float32 tensor of size [N, 3].
"""
if method == 'pca':
if k <= 3:
raise ValueError('At least 3 neighbors are required for computing PCA.')
elif method == 'cross':
if k <= 2:
raise ValueError('At least 2 neighbors are required for computing cross.')
else:
raise ValueError(('Unknown method of normal prediction %s' % method))
n = tf.shape(points)[0]
d = points.get_shape().as_list()[1]
if d != 3:
raise ValueError('Points dimension is not 3.')
_, knn_adjacencies = knn_graph_from_points_unbatched(
points=points, k=k, distance_upper_bound=distance_upper_bound)
knn_adjacencies = knn_adjacencies[:, 1:]
knn_adjacencies = tf.reshape(knn_adjacencies, [n * (k - 1)])
adjacency_points = tf.gather(points, indices=knn_adjacencies)
adjacency_points = tf.reshape(adjacency_points, [n, (k - 1), d])
if method == 'pca':
adjacency_relative_points = adjacency_points - tf.expand_dims(
points, axis=1)
if noise_magnitude is not None:
adjacency_relative_points += tf.random.uniform(
tf.shape(adjacency_relative_points),
minval=-noise_magnitude,
maxval=noise_magnitude,
dtype=tf.float32)
_, _, v = tf.linalg.svd(adjacency_relative_points)
normals = v[:, 2, :]
elif method == 'cross':
v1 = adjacency_points[:, 0, :] - points
v2 = adjacency_points[:, 1, :] - points
normals = tf.linalg.cross(v1, v2)
normals_length = tf.expand_dims(tf.norm(normals, axis=1), axis=1)
if noise_magnitude is not None:
normals_length += noise_magnitude
normals /= normals_length
else:
raise ValueError(('Unknown method of normal prediction %s' % method))
if viewpoint is not None:
normals = flip_normals_towards_viewpoint(
points=points, normals=normals, viewpoint=viewpoint)
return normals
@gin.configurable
def points_to_normals(points,
num_valid_points,
k=10,
distance_upper_bound=0.5,
viewpoints=None,
noise_magnitude=1e-4,
method='pca'):
"""Computes normals for the points in a point cloud.
Args:
points: A tf.float32 tensor of size [batch_size, N, 3].
num_valid_points: A tf.int32 tensor of size [batch_size] representing the
number of valid points in each example.
k: An integer determining the size of the neighborhood.
distance_upper_bound: Maximum distance of the neighbor points. If None, it
will not add a cap on the distance.
viewpoints: A tf.float32 tensor of size [batch_size, 3]. Normals will be
flipped to point towards view point. If None, it won't be used.
noise_magnitude: Noise magnitude to be added to the input of svd. If None,
it won't add noise.
method: The normal prediction method, options are `pca` and `cross` (cross
product).
Returns:
normals: A tf.float32 tensor of size [batch_size, N, 3].
"""
batch_size = points.get_shape().as_list()[0]
if batch_size is None:
raise ValueError('batch_size is unknown at graph construction time.')
num_points = tf.shape(points)[1]
def fn_normals_single_batch(i):
"""Function for computing normals for a single batch."""
num_valid_points_i = num_valid_points[i]
points_i = points[i, 0:num_valid_points_i, :]
if viewpoints is None:
viewpoint_i = None
else:
viewpoint_i = viewpoints[i, :]
normals_i = points_to_normals_unbatched(
points=points_i,
k=k,
distance_upper_bound=distance_upper_bound,
viewpoint=viewpoint_i,
noise_magnitude=noise_magnitude,
method=method)
return tf.pad(
normals_i, paddings=[[0, num_points - num_valid_points_i], [0, 0]])
normals = []
for i in range(batch_size):
normals.append(fn_normals_single_batch(i))
return tf.stack(normals, axis=0)
def np_knn_graph_from_points_unbatched(points,
k,
distance_upper_bound,
mask=None):
"""Returns the distances and indices of the neighbors of each point.
Args:
points: A np.float32 numpy array of [N, D] where D is the point dimensions.
k: Number of neighbors for each point.
distance_upper_bound: Only build the graph using points that are closer than
this distance.
mask: If None, will be ignored. If not None, A np.bool numpy array of
size [N]. knn will be applied to only points where the mask is True. The
points where the mask is False will have themselves as their neighbors.
Returns:
distances: A np.float32 numpy array of [N, k].
indices: A np.int32 numpy array of [N, k].
"""
num_points = points.shape[0]
if mask is None:
mask = np.ones([num_points], dtype=np.bool)
num_masked_points = np.sum(mask.astype(np.int32))
indices = np.expand_dims(np.arange(num_points), axis=1)
indices = np.tile(indices, [1, k])
distances = np.zeros([num_points, k], dtype=np.float32)
if num_masked_points >= k:
masked_points = points[mask, :]
tree = spatial.cKDTree(masked_points)
masked_distances, masked_indices = tree.query(
masked_points, k=k, distance_upper_bound=distance_upper_bound)
placeholder = np.tile(
np.expand_dims(np.arange(num_masked_points), axis=1), [1, k])
valid_mask = np.greater_equal(masked_indices,
num_masked_points).astype(np.int32)
masked_indices = masked_indices * (1 -
valid_mask) + placeholder * valid_mask
masked_distances = np.nan_to_num(masked_distances)
masked_distances *= (1.0 - valid_mask)
masked_indices_shape = masked_indices.shape
masked_indices = np.arange(num_points)[mask][np.reshape(
masked_indices, [-1])]
masked_indices = | np.reshape(masked_indices, masked_indices_shape) | numpy.reshape |
import numpy as np
import pytest
from mock import patch
from numpy.testing import assert_almost_equal
from robogym.envs.dactyl.full_perpendicular import make_simple_env
from robogym.envs.dactyl.locked import make_env as make_env_locked
from robogym.envs.dactyl.reach import make_simple_env as make_reach_env
from robogym.mujoco.helpers import joint_qpos_ids_from_prefix
from robogym.utils import rotation
from robogym.utils.dactyl_utils import actuated_joint_range
from robogym.wrappers.dactyl import (
FingersFreezingPhasespaceMarkers,
FingersOccludedPhasespaceMarkers,
RandomizedPhasespaceFingersWrapper,
RandomizedRobotDampingWrapper,
RandomizedRobotKpWrapper,
)
from robogym.wrappers.randomizations import QUAT_NOISE_CORRECTION # noqa
from robogym.wrappers.randomizations import (
ActionDelayWrapper,
ActionNoiseWrapper,
BacklashWrapper,
ObservationDelayWrapper,
RandomizedActionLatency,
RandomizedBrokenActuatorWrapper,
RandomizedCubeFrictionWrapper,
RandomizedGravityWrapper,
RandomizedJointLimitWrapper,
RandomizedRobotFrictionWrapper,
RandomizedTendonRangeWrapper,
RandomizedTimestepWrapper,
RandomizeObservationWrapper,
)
VISUALIZE = False
def test_wrapper_divergence():
"""
This test run the same action in the vanilla dactyl_locked env and the one that is wrapped in
a given wrappers. After some steps, the wrapped env should diverge from the vanilla version.
"""
env_kwargs = {
"n_random_initial_steps": 0,
}
simple_env = make_simple_env(parameters=env_kwargs, starting_seed=0)
dummy_env = make_simple_env(
parameters=env_kwargs, starting_seed=0
) # should be exact same as `simple_env`
# Add you wrappers here!
wrappers_to_test = [
(ActionNoiseWrapper, {}),
(BacklashWrapper, {}),
(FingersOccludedPhasespaceMarkers, {}), # Need 'noisy_fingertip_pos'
(FingersFreezingPhasespaceMarkers, {}), # Need 'noisy_fingertip_pos'
(
RandomizedBrokenActuatorWrapper,
{
"proba_broken": 1.0, # force one broken actuators
"max_broken_actuators": 1,
},
),
(RandomizedRobotFrictionWrapper, {}),
(RandomizedCubeFrictionWrapper, {}),
(RandomizedGravityWrapper, {}),
(RandomizedJointLimitWrapper, {}),
(RandomizedTendonRangeWrapper, {}),
(RandomizedPhasespaceFingersWrapper, {}),
(RandomizedRobotDampingWrapper, {}),
(RandomizedRobotKpWrapper, {}),
(RandomizedTimestepWrapper, {}),
(ActionDelayWrapper, {}),
# With default args, the maximum qpos difference is too small.
(RandomizedActionLatency, {"max_delay": 2}), # default 1
# (RandomizedBodyInertiaWrapper, {}), # default mass_range=[0.5, 1.5]
]
wrapped_envs = []
for wrapper_class, kwargs in wrappers_to_test:
env = make_simple_env(parameters=env_kwargs, starting_seed=0)
if wrapper_class in (
FingersOccludedPhasespaceMarkers,
FingersFreezingPhasespaceMarkers,
):
env = RandomizeObservationWrapper(
env=env,
levels={"fingertip_pos": {"uncorrelated": 0.002, "additive": 0.001}},
)
env = wrapper_class(env=env, **kwargs)
env.reset()
wrapped_envs.append(env)
for i in range(200):
action = np.ones(env.action_space.shape)
simple_env.step(action)
dummy_env.step(action)
for env in wrapped_envs:
env.step(action)
target_qpos_idxs = joint_qpos_ids_from_prefix(
simple_env.unwrapped.sim.model, "target:"
)
kept_indices = set(range(simple_env.unwrapped.sim.data.qpos.shape[0])) - set(
target_qpos_idxs
)
kept_indices = sorted(kept_indices)
def get_non_target_qpos(_env):
return np.array(_env.unwrapped.sim.data.qpos.copy()[kept_indices])
# Make sure the base env is deterministic
assert np.array_equal(
get_non_target_qpos(simple_env), get_non_target_qpos(dummy_env)
)
for env in wrapped_envs:
diffs = np.absolute(get_non_target_qpos(simple_env) - get_non_target_qpos(env))
assert np.max(diffs) > 1e-4, "failed for {}".format(env.__class__.__name__)
assert np.min(diffs) > 0.0, "failed for {}".format(env.__class__.__name__)
def test_randomize_obs_wrapper():
state = np.random.get_state()
try:
np.random.seed(1)
quat_noise_factor = QUAT_NOISE_CORRECTION
# test that randomization of Euler angles and quaternions has same distance
n = 10000
a_bias = 0.1
additive_bias = a_bias * np.random.standard_normal(size=(n, 3))
# multiplicative bias does not make sense for random angles
angle = np.random.uniform(-np.pi, np.pi, size=(n, 3))
new_angle = angle + additive_bias
angle_dist = np.linalg.norm(rotation.subtract_euler(new_angle, angle), axis=-1)
angle = np.random.uniform(-np.pi, np.pi, size=(n, 1))
axis = np.random.uniform(-1.0, 1.0, size=(n, 3))
quat = rotation.quat_from_angle_and_axis(angle, axis)
# double the additive bias to roughly equal the angular distance
noise_angle = a_bias * quat_noise_factor * np.random.standard_normal(size=(n,))
noise_axis = np.random.uniform(-1.0, 1.0, size=(n, 3))
noise_quat = rotation.quat_from_angle_and_axis(noise_angle, noise_axis)
new_quat = rotation.quat_mul(quat, noise_quat)
quat_diff = rotation.quat_difference(quat, new_quat)
quat_dist = rotation.quat_magnitude(quat_diff)
mean_angle_dist = | np.mean(angle_dist) | numpy.mean |
#!/usr/bin/env python
"""
compare_ffs.py
For 2+ SDF files that are analogous in terms of molecules and their conformers,
assess them (e.g., having FF geometries) with respective to a reference SDF
file (e.g., having QM geometries). Metrics include: RMSD of conformers, TFD
(another geometric evaluation), and relative energy differences.
By: <NAME>
Version: Jan 10 2020
Examples:
python compare_ffs.py -i match.in -t 'SMILES QCArchive' --plot
python compare_ffs.py -i match.in -t 'SMILES QCArchive' --plot --molslice 25 26 3:5 fc00:db20:35b:7399::5
"""
import os
import numpy as np
from scipy.interpolate import interpn
import pickle
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import openeye.oechem as oechem
import rdkit.Chem as Chem
from rdkit.Chem import TorsionFingerprints
import reader
### ------------------- Functions -------------------
def calc_tfd(ref_mol, query_mol, conf_id_tag):
"""
Calculate Torsion Fingerprint Deviation between two molecular structures.
RDKit is required for TFD calculation.
References
----------
Modified from the following code:
https://github.com/MobleyLab/off-ffcompare
TFD reference:
https://pubs.acs.org/doi/10.1021/ci2002318
Parameters
----------
ref_mol : OEMol
query_mol : OEMol
conf_id_tag : string
label of the SD tag that should be the same for matching conformers
in different files
Returns
-------
tfd : float
Torsion Fingerprint Deviation between ref and query molecules
"""
# convert refmol to one readable by RDKit
ref_rdmol = reader.rdmol_from_oemol(ref_mol)
# convert querymol to one readable by RDKit
que_rdmol = reader.rdmol_from_oemol(query_mol)
# check if the molecules are the same
# tfd requires the two molecules must be instances of the same molecule
rsmiles = Chem.MolToSmiles(ref_rdmol)
qsmiles = Chem.MolToSmiles(que_rdmol)
if rsmiles != qsmiles:
print(
f"- WARNING: The reference mol '{ref_mol.GetTitle()}' and "
f"query mol '{query_mol.GetTitle()}' do NOT have the same "
"SMILES strings as determined by RDKit MolToSmiles. It is "
"possible that they did not have matching SMILES even before "
"conversion from OEMol to RDKit mol. Listing in order the "
"QCArchive SMILES string, RDKit SMILES for ref mol, and "
"RDKit SMILES for query mol:"
f"\n {oechem.OEGetSDData(ref_mol, conf_id_tag)}"
f"\n {rsmiles}\n {qsmiles}"
)
tfd = np.nan
# calculate the TFD
else:
try:
tfd = TorsionFingerprints.GetTFDBetweenMolecules(ref_rdmol, que_rdmol)
# triggered for molecules such as urea
except IndexError:
print(
f"- Error calculating TFD on molecule '{ref_mol.GetTitle()}'."
" Possibly no non-terminal rotatable bonds found."
)
tfd = np.nan
return tfd
def compare_ffs(in_dict, conf_id_tag, out_prefix, keep_ref_conf=False, mol_slice=None):
"""
For 2+ SDF files that are analogous in terms of molecules and their
conformers, assess them by RMSD, TFD, and relative energy differences.
Parameters
----------
in_dict : OrderedDict
dictionary from input file, where key is method and value is dictionary
first entry should be reference method
in sub-dictionary, keys are 'sdfile' and 'sdtag'
conf_id_tag : string
label of the SD tag that should be the same for matching conformers
in different files
out_prefix : string
prefix appended to sdf file name to write out new SDF file
with RMSD and TFD info added as SD tags
keep_ref_conf : Boolean
True to keep reference conformer energy for each molecule
False to remove reference conformer energy;
note that qm ref conf defines where dE=0 for a certain molecule
so if qm ref conf is same as ff ref conf, ddE=0 may be inflated;
qm ref conf may or may not be the same as ff ref conf;
ref conf data also removed for RMSD/TFD data (for scatter plots)
mol_slice : numpy slice object
The resulting integers are numerically sorted and duplicates removed.
e.g., slices = np.s_[0, 3:5, 6::3] would be parsed to return
[0, 3, 4, 6, 9, 12, 15, 18, ...]
Can also parse from end: [-3:] gets the last 3 molecules, and
[-2:-1] is the same as [-2] to get just next to last molecule.
Returns
-------
enes_full : 3D list
enes_full[i][j][k] = ddE of ith method, jth mol, kth conformer.
ddE = (dE of query method) - (dE of ref method),
where the dE is computed as conformer M - conformer N,
and conformer N is chosen from the lowest energy of the ref confs.
the reference method is not present; i.e., self-comparison is skipped,
so the max i value represents total number of files minus one.
rmsds_full : 3D list
same format as that of enes_full but with conformer RMSDs
tfds_full : 3D list
same format as that of enes_full but with conformer TFDs
smiles_full : 3D list
same format as that of enes_full but with conformer SMILES strings
"""
# set RMSD calculation parameters
automorph = True # take into acct symmetry related transformations
heavyOnly = False # do consider hydrogen atoms for automorphisms
overlay = True # find the lowest possible RMSD
# initiate final data lists
enes_full = []
rmsds_full = []
tfds_full = []
smiles_full = []
# get first filename representing the reference geometries
sdf_ref = list(in_dict.values())[0]["sdfile"]
tag_ref = list(in_dict.values())[0]["sdtag"]
# assess each file against reference
for ff_label, ff_dict in in_dict.items():
# get details of queried file
sdf_que = ff_dict["sdfile"]
tag_que = ff_dict["sdtag"]
if sdf_que == sdf_ref:
continue
# initiate new sublists
enes_method = []
rmsds_method = []
tfds_method = []
smiles_method = []
# open an output file to store query molecules with new SD tags
out_file = f"{out_prefix}_{os.path.basename(sdf_que)}"
ofs = oechem.oemolostream()
if not ofs.open(out_file):
oechem.OEThrow.Fatal(f"Unable to open {out_file} for writing")
# load molecules from open reference and query files
print(f"\n\nOpening reference file {sdf_ref}")
mols_ref = reader.read_mols(sdf_ref, mol_slice)
print(f"Opening query file {sdf_que} for [ {ff_label} ] energies")
mols_que = reader.read_mols(sdf_que, mol_slice)
# loop over each molecule in reference and query files
for rmol, qmol in zip(mols_ref, mols_que):
# initial check that they have same title and number of confs
rmol_name = rmol.GetTitle()
rmol_nconfs = rmol.NumConfs()
if (rmol_name != qmol.GetTitle()) or (rmol_nconfs != qmol.NumConfs()):
raise ValueError(
"ERROR: Molecules not aligned in iteration. "
"Offending molecules and number of conformers:\n"
f"'{rmol_name}': {rmol_nconfs} nconfs\n"
f"'{qmol.GetTitle()}': {qmol.NumConfs()} nconfs"
)
# initialize lists to store conformer energies
enes_ref = []
enes_que = []
rmsds_mol = []
tfds_mol = []
smiles_mol = []
# loop over each conformer of this mol
for ref_conf, que_conf in zip(rmol.GetConfs(), qmol.GetConfs()):
# check confomer match from the specified tag
ref_id = oechem.OEGetSDData(ref_conf, conf_id_tag)
que_id = oechem.OEGetSDData(que_conf, conf_id_tag)
if ref_id != que_id:
raise ValueError(
"ERROR: Conformers not aligned in iteration"
f" for mol: '{rmol_name}'. The conformer "
f"IDs ({conf_id_tag}) for ref and query are:"
f"\n{ref_id}\n{que_id}."
)
# note the smiles id
smiles_mol.append(ref_id)
# get energies
enes_ref.append(float(oechem.OEGetSDData(ref_conf, tag_ref)))
enes_que.append(float(oechem.OEGetSDData(que_conf, tag_que)))
# compute RMSD between reference and query conformers
rmsd = oechem.OERMSD(ref_conf, que_conf, automorph, heavyOnly, overlay)
rmsds_mol.append(rmsd)
# compute TFD between reference and query conformers
tfd = calc_tfd(ref_conf, que_conf, conf_id_tag)
tfds_mol.append(tfd)
# store data in SD tags for query conf, and write conf to file
oechem.OEAddSDData(que_conf, f"RMSD to {sdf_ref}", str(rmsd))
oechem.OEAddSDData(que_conf, f"TFD to {sdf_ref}", str(tfd))
oechem.OEWriteConstMolecule(ofs, que_conf)
# compute relative energies against lowest E reference conformer
lowest_ref_idx = enes_ref.index(min(enes_ref))
rel_enes_ref = np.array(enes_ref) - enes_ref[lowest_ref_idx]
rel_enes_que = np.array(enes_que) - enes_que[lowest_ref_idx]
# remove the reference conformer of dE = 0
if not keep_ref_conf:
rel_enes_ref = np.delete(rel_enes_ref, lowest_ref_idx)
rel_enes_que = np.delete(rel_enes_que, lowest_ref_idx)
rmsds_mol.pop(lowest_ref_idx)
tfds_mol.pop(lowest_ref_idx)
smiles_mol.pop(lowest_ref_idx)
# subtract them to get ddE = dE (query method) - dE (ref method)
enes_mol = np.array(rel_enes_que) - np.array(rel_enes_ref)
# store then move on
enes_method.append(enes_mol)
rmsds_method.append(np.array(rmsds_mol))
tfds_method.append(np.array(tfds_mol))
smiles_method.append(smiles_mol)
# print(rmsds_method, len(rmsds_method))
# print(enes_method, len(enes_method))
enes_full.append(enes_method)
rmsds_full.append(rmsds_method)
tfds_full.append(tfds_method)
smiles_full.append(smiles_method)
ofs.close()
return enes_full, rmsds_full, tfds_full, smiles_full
def flatten(list_of_lists):
"""
Flatten one level of nesting.
Parameter
---------
list_of_lists
Returns
-------
1D numpy array
"""
return np.concatenate(list_of_lists).ravel()
def draw_scatter(
x_data, y_data, method_labels, x_label, y_label, out_file, what_for="talk"
):
"""
Draw scatter plot, such as of (ddE vs RMSD) or (ddE vs TFD).
Parameters
----------
x_data : list of lists
x_data[i][j] represents ith method and jth molecular structure
y_data : list of lists
should have same shape and correspond to x_data
method_labels : list
list of all the method names including reference method first
x_label : string
name of the x-axis label
y_label : string
name of the y-axis label
out_file : string
name of the output file
what_for : string
dictates figure size, text size of axis labels, legend, etc.
"paper" or "talk"
"""
print(f"\nNumber of data points in full scatter plot: {len(flatten(x_data))}")
markers = ["o", "^", "d", "x", "s", "p", "P", "3", ">"]
num_methods = len(x_data)
plist = []
for i in range(num_methods):
p = plt.scatter(
x_data[i],
y_data[i],
marker=markers[i],
label=method_labels[i + 1],
alpha=0.6,
)
plist.append(p)
if what_for == "paper":
fig = plt.gcf()
fig.set_size_inches(4, 3)
plt.subplots_adjust(left=0.16, right=0.72, top=0.9, bottom=0.2)
plt.xlabel(x_label, fontsize=10)
plt.ylabel(y_label, fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.legend(loc=(1.04, 0.4), fontsize=10)
# make the marker size smaller
for p in plist:
p.set_sizes([8.0])
elif what_for == "talk":
plt.xlabel(x_label, fontsize=14)
plt.ylabel(y_label, fontsize=14)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.legend(loc=(1.04, 0.5), fontsize=12)
# make the marker size smaller
for p in plist:
p.set_sizes([4.0])
# set log scaling but use symmetric log for negative values
# plt.yscale('symlog')
plt.savefig(out_file, bbox_inches="tight")
plt.clf()
# plt.show()
def draw_corr(
x_data, y_data, method_labels, x_label, y_label, out_file, what_for="talk"
):
"""
Draw scatter plot, such as of (ddE vs RMSD) or (ddE vs TFD).
Parameters
----------
x_data : list of lists
x_data[i][j] represents ith method and jth molecular structure
y_data : list of lists
should have same shape and correspond to x_data
method_labels : list
list of all the method names including reference method first
x_label : string
name of the x-axis label
y_label : string
name of the y-axis label
out_file : string
name of the output file
what_for : string
dictates figure size, text size of axis labels, legend, etc.
"paper" or "talk"
"""
print(f"\nNumber of data points in full scatter plot: {len(flatten(x_data))}")
markers = ["o", "^", "d", "x", "s", "p", "P", "3", ">"]
num_methods = len(x_data)
plist = []
for i in range(num_methods):
p = plt.scatter(
x_data[i],
y_data[i],
marker=markers[i],
label=method_labels[i + 1],
alpha=0.6,
)
plist.append(p)
if what_for == "paper":
fig = plt.gcf()
fig.set_size_inches(5, 4)
plt.subplots_adjust(left=0.16, right=0.72, top=0.9, bottom=0.2)
plt.xlabel(x_label, fontsize=10)
plt.ylabel(y_label, fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.legend(loc=(1.04, 0.4), fontsize=10)
# make the marker size smaller
for p in plist:
p.set_sizes([8.0])
elif what_for == "talk":
plt.xlabel(x_label, fontsize=14)
plt.ylabel(y_label, fontsize=14)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.legend(loc=(1.04, 0.5), fontsize=12)
# make the marker size smaller
for p in plist:
p.set_sizes([4.0])
plt.savefig(out_file, bbox_inches="tight")
plt.clf()
# plt.show()
def draw_ridgeplot(
mydata,
method_labels,
x_label,
out_file,
what_for="paper",
bw="scott",
same_subplot=False,
sym_log=False,
hist_range=(-15, 15),
):
"""
Draw ridge plot of data (to which kernel density estimate is applied)
segregated by each method (representing a different color/level).
Modified from the following code:
https://seaborn.pydata.org/examples/kde_ridgeplot.html
Parameters
----------
mydata : list of lists
mydata[i][j] represents ith method and jth molecular structure
method_labels : list
list of all the method names including reference method first
x_label : string
name of the x-axis label
also used for pandas dataframe column name
out_file : string
name of the output file
what_for : string
dictates figure size, text size of axis labels, legend, etc.
"paper" or "talk"
bw : string or float
defines bandwidth for KDE as called in seaborn.kdeplot, OR don't use
kde at all and just histogram the data;
options: 'scott' (KDE), 'silverman' (KDE), scalar value (KDE), 'hist'
same_subplot : Boolean
False is default to have separate and slightly overlapping plots,
True to plot all of them showing on the same subplot (no fill)
sym_log : Boolean
False is default to plot density estimate as is,
True to plot x-axis on symmetric log scale
hist_range : tuple
tuple of min and max values to use for histogram;
only needed if bw is set to 'hist'
"""
# Define and use a simple function to label the plot in axes coordinates
def label(x, color, label):
ax = plt.gca()
# set axis font size
if what_for == "paper":
fs = 14
elif what_for == "talk":
fs = 14
ax.text(
0,
0.2,
label,
fontweight="bold",
color=color,
fontsize=fs,
ha="left",
va="center",
transform=ax.transAxes,
)
if what_for == "paper":
ridgedict = {
"h": 0.9,
"lw": 2.0,
"vl": 1.0,
"xfontsize": 14,
}
elif what_for == "talk":
ridgedict = {
"h": 2.0,
"lw": 3.0,
"vl": 1.0,
"xfontsize": 16,
}
num_methods = len(mydata)
# Initialize the FacetGrid object
my_cmap = "tab10"
sns.palplot(sns.color_palette(my_cmap))
colors = sns.color_palette(my_cmap)
# convert data to dataframes for ridge plot
temp = []
for i in range(num_methods):
df = pd.DataFrame(mydata[i], columns=[x_label])
df["method"] = method_labels[i + 1]
temp.append(df)
# list of dataframes concatenated to single dataframe
df = pd.concat(temp, ignore_index=True)
# print(method_labels)
g = sns.FacetGrid(
df, row="method", hue="method", aspect=10, height=ridgedict["h"], palette=colors
)
if not same_subplot:
# draw filled-in densities
if bw == "hist":
histoptions = {
"histtype": "bar",
"alpha": 0.6,
"linewidth": ridgedict["lw"],
"range": hist_range,
"align": "mid",
}
g.map(
sns.distplot,
x_label,
hist=True,
kde=False,
bins=15,
norm_hist=True,
hist_kws=histoptions,
)
else:
g.map(
sns.kdeplot,
x_label,
clip_on=False,
shade=True,
alpha=0.5,
lw=ridgedict["lw"],
bw=bw,
)
# draw colored horizontal line below densities
g.map(plt.axhline, y=0, lw=ridgedict["lw"], clip_on=False)
else:
# draw black horizontal line below densities
plt.axhline(y=0, color="black")
# draw outline around densities; can also single outline color: color="k"
if bw == "hist":
histoptions = {
"histtype": "step",
"alpha": 1.0,
"linewidth": ridgedict["lw"],
"range": hist_range,
"align": "mid",
}
g.map(
sns.distplot,
x_label,
hist=True,
kde=False,
bins=15,
norm_hist=True,
hist_kws=histoptions,
)
else:
g.map(sns.kdeplot, x_label, clip_on=False, lw=ridgedict["lw"], bw=bw)
# draw a vertical line at x=0 for visual reference
g.map(plt.axvline, x=0, lw=ridgedict["vl"], ls="--", color="gray", clip_on=False)
# optional: add custom vertical line
# g.map(plt.axvline, x=0.12, lw=1, ls='--', color='gray', clip_on=False)
# add labels to each level
if not same_subplot:
g.map(label, x_label)
# else if single subplot, generate a custom legend
else:
cmap = mpl.cm.tab10
patches = []
n_ffs = len(method_labels) - 1
for i in range(n_ffs):
patches.append(
mpl.patches.Patch(
color=cmap(i/10),
label=method_labels[i + 1],
)
)
plt.legend(handles=patches, fontsize=ridgedict["xfontsize"] / 1.2)
# optional: set symmetric log scale on x-axis
if sym_log:
g.set(xscale="symlog")
# Set the subplots to overlap
if not same_subplot:
g.fig.subplots_adjust(hspace=-0.45)
else:
g.fig.subplots_adjust(hspace=-1.0)
# Remove axes details that don't play well with overlap
g.set_titles("")
# g.set(yticks=[])
g.despine(bottom=True) # , left=True)
# ax = plt.gca()
# ax.spines['left'].set_visible(True)
# ax.spines['left'].set_position('zero')
# ax.set_yticks([0.4])
if what_for == "paper":
plt.gcf().set_size_inches(7, 3)
elif what_for == "talk":
plt.gcf().set_size_inches(12, 9)
# adjust font sizes
plt.xlabel(x_label, fontsize=ridgedict["xfontsize"])
plt.ylabel("Density", fontsize=ridgedict["xfontsize"])
plt.xticks(fontsize=ridgedict["xfontsize"])
# save with transparency for overlapping plots
plt.savefig(out_file, transparent=True, bbox_inches="tight")
plt.clf()
# plt.show()
def draw_density2d(
x_data,
y_data,
title,
x_label,
y_label,
out_file,
what_for="talk",
bins=20,
x_range=None,
y_range=None,
z_range=None,
z_interp=True,
symlog=False,
):
"""
Draw a scatter plot colored smoothly to represent the 2D density.
Based on: https://stackoverflow.com/a/53865762/8397754
Parameters
----------
x_data : 1D list
represents x-axis data for all molecules of a given method
y_data : 1D list
should have same shape and correspond to x_data
title : string
title of the plot
x_label : string
name of the x-axis label
y_label : string
name of the y-axis label
out_file : string
name of the output file
what_for : string
dictates figure size, text size of axis labels, legend, etc.
"paper" or "talk"
bins : int
number of bins for np.histogram2d
x_range : tuple of two floats
min and max values of x-axis
y_range : tuple of two floats
min and max values of y-axis
z_range : tuple of two floats
min and max values of density for setting a uniform color bar;
these should be at or beyond the bounds of the min and max
z_interp : Boolean
True to smoothen the color scale for the scatter plot points;
False to plot 2d histograms colored by cells (no scatter plot)
symlog : Boolean
True to represent y-axis on (symmetric) log scale (linear
between -1 and 1 ), False for linear y-scaling
"""
def colorbar_and_finish(labelsize, fname):
cb = plt.colorbar()
cb.ax.tick_params(labelsize=labelsize)
cb.ax.set_title("counts", size=labelsize)
plt.savefig(fname, bbox_inches="tight")
plt.clf()
# plt.show()
fig = plt.gcf()
if what_for == "paper":
ms = 1
size1 = 10
size2 = 10
fig.set_size_inches(4, 3)
elif what_for == "talk":
ms = 4
size1 = 14
size2 = 16
fig.set_size_inches(9, 6)
plt_options = {"s": ms, "cmap": "coolwarm_r"}
# label and adjust plot
plt.title(title, fontsize=size2)
plt.xlabel(x_label, fontsize=size2)
plt.ylabel(y_label, fontsize=size2)
plt.xticks(fontsize=size1)
plt.yticks(fontsize=size1)
if x_range is not None:
plt.xlim(x_range[0], x_range[1])
if y_range is not None:
plt.ylim(y_range[0], y_range[1])
# remove any nans from x_data, such as TFD score for urea-like mols
nan_inds = np.isnan(x_data)
x_data = x_data[~nan_inds]
y_data = y_data[~nan_inds]
print(f"\nNumber of data points in FF scatter plot: {len(x_data)}")
# compute histogram in 2d
data, x_e, y_e = np.histogram2d(x_data, y_data, bins=bins)
# plot colored 2d histogram if z_interp not specified
if not z_interp:
extent = [x_e[0], x_e[-1], y_e[0], y_e[-1]]
plt.imshow(
data.T,
extent=extent,
origin="lower",
aspect="auto",
cmap=plt_options["cmap"],
vmin=z_range[0],
vmax=z_range[1],
)
colorbar_and_finish(size1, out_file)
return
# smooth/interpolate data
z = interpn(
(0.5 * (x_e[1:] + x_e[:-1]), 0.5 * (y_e[1:] + y_e[:-1])),
data,
np.vstack([x_data, y_data]).T,
method="splinef2d",
bounds_error=False,
)
# sort the points by density, so that the densest points are plotted last
idx = z.argsort()
x, y, z = x_data[idx], y_data[idx], z[idx]
print(
f"{title} ranges of data in density plot:\n\t\tmin\t\tmax"
f"\nx\t{np.min(x):10.4f}\t{np.max(x):10.4f}"
f"\ny\t{np.min(y):10.4f}\t{np.max(y):10.4f}"
f"\nz\t{np.min(data):10.4f}\t{ | np.max(data) | numpy.max |
# coding: utf-8
# # Advanced Lane Finding Using OpenCV
# **In this project, I used OpenCV to write a software pipeline to identify the lane boundaries in a video from a front-facing camera on a car.**
# ## Pipeline architecture:
# - **Compute Camera Calibration.**
# - **Apply Distortion Correction**.
# - **Apply a Perspective Transform.**
# - **Create a Thresholded Binary Image.**
# - **Define the Image Processing Pipeline.**
# - **Detect Lane Lines.**
# - **Determine the Curvature of the Lane and Vehicle Position.**
# - **Visual display of the Lane Boundaries and Numerical Estimation of Lane Curvature and Vehicle Position.**
# - **Process Project Videos.**
#
# I'll explain each step in details below.
# #### Environement:
# - Ubuntu 16.04
# - Anaconda 5.0.1
# - Python 3.6.2
# - OpenCV 3.1.0
# In[1]:
# Importing Python libraries
import numpy as np
import cv2
import pickle
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
from ipywidgets import interact, interactive, fixed
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# In[2]:
def display(img1, img2, lbl1, lbl2, x, y, img3=[], lbl3=[], cmap=None, n = 2):
"""
Diplay the input images side-by-side.
Parameters:
img1: Input image #1.
img2: Input image #2.
lbl1: Label for input image #1.
lbl2: Label for input image #2.
x, y: Figure size.
cmap (Default = None): Used to display gray images.
"""
plt.figure(figsize=(x, y))
plt.subplot(1, n, 1)
plt.imshow(img1, cmap = cmap)
plt.xlabel(lbl1, fontsize=15)
plt.xticks([])
plt.yticks([])
plt.subplot(1, n, 2)
plt.imshow(img2, cmap = cmap)
plt.xlabel(lbl2, fontsize=15)
plt.xticks([])
plt.yticks([])
if n == 3:
plt.subplot(1, n, 3)
plt.imshow(img3, cmap = cmap)
plt.xlabel(lbl3, fontsize=15)
plt.xticks([])
plt.yticks([])
plt.show()
# ---
# ## Step 1: Compute Camera Calibration
# The OpenCV functions `cv2.findChessboardCorners()` and `cv2.drawChessboardCorners()` are used for image calibration. We have 20 images of a chessboard, located in `./camera_cal`, taken from different angles with the same camera, and we'll use them as input for camera calibration routine.
#
# `cv2.findChessboardCorners()` attempts to determine whether the input image is a view of the chessboard pattern and locate the internal chessboard corners, and then `cv2.drawChessboardCorners()` draws individual chessboard corners detected.
#
# Arrays of object points, corresponding to the location of internal corners of a chessboard, and image points, the pixel locations of the internal chessboard corners determined by `cv2.findChessboardCorners()`, are fed to `cv2.drawChessboardCorners()` which returns camera calibration and distortion coefficients.
#
#
# These will then be used by the OpenCV `cv2.calibrateCamera()` to find the camera intrinsic and extrinsic parameters from several views of a calibration pattern. These parameters will be fed to `cv2.undistort` function to correct for distortion on any image produced by the same camera.
# In[5]:
cal_images = glob.glob('camera_cal/*.jpg')
test_images = glob.glob('test_images/*.jpg')
nx, ny = 9, 6
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)
# In[6]:
def calibrate_camera(cal_images, nx, ny):
"""
Compute camera calibration and return the camera intrinsic and extrinsic parameters.
Parameters:
cal_images: A list of the chessboard calibration images.
nx, ny: Chessboard dimensions.
"""
objpoints = [] # 3D points
imgpoints = [] # 2D points
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)
for file in cal_images:
img = cv2.imread(file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
return mtx, dist
# In[7]:
mtx, dist = calibrate_camera(cal_images, nx, ny)
# ---
# ## Step 2: Apply Distortion Correction
# OpenCV provides `cv2.undistort` function, which transforms an image to compensate for radial and tangential lens distortion.
# In[8]:
def undistort(img, mtx, dist):
"""
Use the camera calibration parameters to correct the input image for distortion.
Parameters:
img: Input image.
mtx: Output floating-point camera matrix.
dist: Output vector of distortion coefficients.
"""
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
# In[9]:
# Testing distortion correction on cal_images
img = cv2.imread(cal_images[0])
undist = undistort(img, mtx, dist)
display(img, undist, 'Original image', 'Distortion corrected image', 14, 7)
# In[10]:
# Testing distortion correction on test_images
img = cv2.cvtColor(cv2.imread(test_images[6]), cv2.COLOR_BGR2RGB)
undist_img_ex = undistort(img, mtx, dist)
display(img, undist_img_ex, 'Original image', 'Distortion corrected image', 14, 7)
# The effect of `undistort` is particularly noticeable, by the change in shape of the car hood at the bottom corners of the image.
# ---
# ## Step 3: Apply a Perspective Transform
# A common task in autonomous driving is to convert the vehicle’s camera view of the scene into a top-down “bird’s-eye” view. We'll use OpenCV's `cv2.getPerspectiveTransform()` and `cv2.getPerspectiveTransform()` to do this task.
# In[11]:
image_shape = undist_img_ex.shape
print("Image shape:", image_shape)
plt.imshow(undist_img_ex)
plt.show()
# In[12]:
# Define the region of interest
src = np.float32([[190, 700], [1110, 700], [720, 470], [570, 470]])
bottom_left = src[0][0]+100, src[0][1]
bottom_right = src[1][0]-200, src[1][1]
top_left = src[3][0]-250, 1
top_right = src[2][0]+200, 1
dst = np.float32([bottom_left, bottom_right, top_right, top_left])
# In[13]:
def perspective_transform(img, src, dst):
"""
Convert the vehicle’s camera view of the scene into a top-down “bird’s-eye” view.
Parameters:
img: Input image.
src: Source points.
dst: Destination points.
"""
image_shape = img.shape
img_size = (image_shape[1], image_shape[0])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(img, M, img_size)
# Return the resulting image and matrix
return warped, M, Minv
# In[14]:
# Applying perspective transform to several test_images
display(undistort(cv2.cvtColor(cv2.imread(test_images[1]), cv2.COLOR_BGR2RGB), mtx, dist),
perspective_transform(undistort(cv2.cvtColor(cv2.imread(test_images[1]), cv2.COLOR_BGR2RGB),
mtx, dist), src, dst)[0],
'Original image', 'Warped image', 14, 7)
display(undistort(cv2.cvtColor(cv2.imread(test_images[7]), cv2.COLOR_BGR2RGB), mtx, dist),
perspective_transform(undistort(cv2.cvtColor(cv2.imread(test_images[7]), cv2.COLOR_BGR2RGB),
mtx, dist), src, dst)[0],
'Original image', 'Warped image', 14, 7)
display(undistort(cv2.cvtColor(cv2.imread(test_images[6]), cv2.COLOR_BGR2RGB), mtx, dist),
perspective_transform(undistort(cv2.cvtColor(cv2.imread(test_images[6]), cv2.COLOR_BGR2RGB),
mtx, dist), src, dst)[0],
'Original image', 'Warped image', 14, 7)
# In[15]:
undist_example_warped = perspective_transform(undist_img_ex, src, dst)[0]
# ---
# ## Step 4: Create a Thresholded Binary Image
# Now, we will use color transform and Sobel differentiation to detect the lane lines in the image.
# ### Exploring different color spaces
# #### RGB color space:
# In[16]:
undist_example_RGB = undist_example_warped
undist_example_R = undist_example_RGB[:,:,0]
undist_example_G = undist_example_RGB[:,:,1]
undist_example_B = undist_example_RGB[:,:,2]
display(undist_example_RGB, undist_example_R, 'Original RGB image', 'RGB R-Channel', 14, 7)
display(undist_example_G, undist_example_B, 'RGB G-Channel', 'RGB B-Channel', 14, 7)
# #### HSV color space:
# This type of color model closely emulates models of human color perception. While in other color models, such as RGB, an image is treated as an additive result of three base colors, the three channels of HSV represent hue (H gives a measure of the spectral composition of a color), saturation (S gives the proportion of pure light of the dominant wavelength, which indicates how far a color is from a gray of equal brightness), and value (V gives the brightness relative to
# the brightness of a similarly illuminated white color) corresponding to the intuitive appeal of tint, shade, and tone.
# In[17]:
undist_example_HSV = cv2.cvtColor(undist_example_RGB, cv2.COLOR_RGB2HSV)
undist_example_HSV_H = undist_example_HSV[:,:,0]
undist_example_HSV_S = undist_example_HSV[:,:,1]
undist_example_HSV_V = undist_example_HSV[:,:,2]
display(undist_example_HSV, undist_example_HSV_H, 'Original HSV image', 'HSV H-Channel', 14, 7)
display(undist_example_HSV_S, undist_example_HSV_V, 'HSV S-Channel', 'HSV V-Channel', 14, 7)
# #### LAB color space:
# The Lab color space describes mathematically all perceivable colors in the three dimensions L for lightness and a and b for the color opponents green–red and blue–yellow.
# In[18]:
undist_example_LAB = cv2.cvtColor(undist_example_RGB, cv2.COLOR_RGB2Lab)
undist_example_LAB_L = undist_example_LAB[:,:,0]
undist_example_LAB_A = undist_example_LAB[:,:,1]
undist_example_LAB_B = undist_example_LAB[:,:,2]
display(undist_example_LAB, undist_example_LAB_L, 'Original LAB image', 'LAB L-Channel', 14, 7)
display(undist_example_LAB_A, undist_example_LAB_B, 'LAB A-Channel', 'LAB B-Channel', 14, 7)
# #### HLS color space:
# This model was developed to specify the values of hue, lightness, and saturation of a color in each channel. The difference with respect to the HSV color model is that the lightness of a pure color defined by HLS is equal to the lightness of a medium gray, while the brightness of a pure color defined by HSV is equal to the brightness of white.
# In[19]:
undist_example_HLS = cv2.cvtColor(undist_example_RGB, cv2.COLOR_RGB2HLS)
undist_example_HLS_H = undist_example_HLS[:,:,0]
undist_example_HLS_L = undist_example_HLS[:,:,1]
undist_example_HLS_S = undist_example_HLS[:,:,2]
display(undist_example_HLS, undist_example_HLS_H, 'Original HLS image', 'HLS H-Channel', 14, 7)
display(undist_example_HLS_L, undist_example_HLS_S, 'HLS L-Channel', 'HLS S-Channel', 14, 7)
# ### Color Space Thresholding
# As you may observe, the white lane lines are clearly highlighted in the L-channel of the of the HLS color space, and the yellow line are clear in the L-channel of the LAP color space as well. We'll apply HLS L-threshold and LAB B-threshold to the image to highlight the lane lines.
# In[20]:
def hls_l_thresh(img, thresh=(220, 255)):
"""
Threshold the input image to the L-channel of the HLS color space.
Parameters:
img: HLS image.
thresh: Minimum and Maximum color intensity.
"""
img = img[:,:,1]
img = img*(255/np.max(img))
binary_output = np.zeros_like(img)
binary_output[(img > thresh[0]) & (img <= thresh[1])] = 1
return binary_output
# In[21]:
thresh_HLS = hls_l_thresh(undist_example_HLS)
display(undist_example_HLS, thresh_HLS, 'HLS image', 'L-thresholded HLS image', 14, 7, cmap = 'gray')
# In[22]:
def lab_b_thresh(img, thresh=(190, 255)):
"""
Threshold the input image to the B-channel of the LAB color space.
Parameters:
img: LAB image.
thresh: Minimum and Maximum color intensity.
"""
img = img[:,:,2]
if np.max(img) > 175:
img = img*(255/np.max(img))
binary_output = np.zeros_like(img)
binary_output[(img > thresh[0]) & (img <= thresh[1])] = 1
return binary_output
# In[23]:
thresh_LAB = lab_b_thresh(undist_example_LAB)
display(undist_example_LAB, thresh_LAB, 'LAB image', 'B-thresholded LAB image', 14, 7, cmap = 'gray')
# In[24]:
def threshold_color_space(img):
"""
Threshold the input image to the L-channel of the HLS color space and the B-channel of the LAB color space.
Parameters:
img: Input image.
"""
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_LAB = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
img_thresh_HLS = hls_l_thresh(img_HLS)
img_thresh_LAB = lab_b_thresh(img_LAB)
combined_img = np.zeros_like(img_thresh_HLS)
combined_img[((img_thresh_HLS == 1) | (img_thresh_LAB == 1))] = 1
return combined_img
# In[25]:
threshold_color_img = threshold_color_space(undist_example_warped)
display(undist_example_warped, threshold_color_img, 'RGB image', 'Thresholded image', 14, 7, cmap = 'gray')
# ### Sobel Differentiation
# Now, we'll explore different Sobel differentiation techniques, and try to come up with a combination that produces a better output than color space thresholding.
# In[26]:
def abs_sobel(img, orient='x', sobel_kernel=3, thresh=(25, 255)):
"""
Apply absolute Sobel diffrentiation to the input image.
Parameters:
img: Input image.
orient (Default = x): Gradients direction.
sobel_kernel (Default = 3): Size of the extended Sobel kernel.
thresh (Default = (25, 255)): Minimum and Maximum gradient strength.
"""
sobel = cv2.Sobel(img, cv2.CV_64F, orient=='x', orient=='y', ksize= sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return sxbinary
# In[27]:
abs_sobel_example_LAB_B = abs_sobel(undist_example_LAB_B)
display(undist_example_LAB_B, abs_sobel_example_LAB_B, 'LAP B-Channel', 'After absolute Sobel', 14, 7, cmap='gray')
# In[28]:
abs_sobel_example_HLS_L = abs_sobel(undist_example_HLS_L)
display(undist_example_HLS_L, abs_sobel_example_HLS_L, 'HLS L-Channel', 'After absolute Sobel', 14, 7, cmap='gray')
# In[29]:
def mag_sobel(img, sobel_kernel=15, thresh=(25, 255)):
"""
Apply magnitude Sobel diffrentiation to the input image.
Parameters:
img: Input image.
sobel_kernel (Default = 15): Size of the extended Sobel kernel.
thresh (Default = (25, 255)): Minimum and Maximum gradient strength.
"""
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
mag_sobel = np.sqrt(np.square(sobelx) + np.square(sobely))
scaled_sobel = np.uint8(255*mag_sobel/ | np.max(mag_sobel) | numpy.max |
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from astropy.io import ascii
from uncertainties import ufloat
import uncertainties.unumpy as unp
# bisher: Berechnung der Schallgeschwindigkeit über unsere Messung mit v_0 und bestimmten lambda
# zudem: Import der ganzen daten mittels Holz-Methode, arrays für die
# difference sind jeweils fertig und können nun gegen den gang (eventuell
# auch gegen die bestimmte geschwindigkeit v des Wagens) geplottet werden
# plathalter für die plots, hier einfach die arrays
Gang = np.linspace(1, 10, 10)
# für vorwärts-und rückwärtsgang einfügen
def nomvalues_array(array):
List = list()
for i in range(len(array)):
List.append(array[i].nominal_value)
array_noms = np.asarray(List)
return array_noms
# short function for generating arrays with nominalvalues
# a)
puls = np.genfromtxt(
"Messdaten/adrianundclemens/adrianclemens_messunga.txt", unpack="True")
n6h = ufloat(np.mean(puls[0:5]), np.std(puls[0:5]) / np.sqrt(5))
n6z = ufloat(np.mean(puls[5:10]), np.std(puls[5:10]) / np.sqrt(5))
n12h = ufloat(np.mean(puls[10:15]), np.std(puls[10:15]) / np.sqrt(5))
n12z = ufloat(np.mean(puls[15:20]), np.std(puls[15:20]) / np.sqrt(5))
n18h = ufloat(np.mean(puls[20:25]), np.std(puls[20:25]) / np.sqrt(5))
n18z = ufloat(np.mean(puls[25:30]), np.std(puls[25:30]) / np.sqrt(5))
n24h = ufloat(np.mean(puls[30:35]), np.std(puls[30:35]) / np.sqrt(5))
n24z = ufloat(np.mean(puls[35:40]), np.std(puls[35:40]) / np.sqrt(5))
n30h = ufloat(np.mean(puls[40:45]), np.std(puls[40:45]) / np.sqrt(5))
n30z = ufloat(np.mean(puls[45:50]), np.std(puls[45:50]) / np.sqrt(5))
n36h = ufloat(np.mean(puls[50:55]), np.std(puls[50:55]) / np.sqrt(5))
n36z = ufloat(np.mean(puls[55:60]), np.std(puls[55:60]) / np.sqrt(5))
n42h = ufloat( | np.mean(puls[60:65]) | numpy.mean |
"""General functions for mathematical and numerical operations.
Functions
---------
- confidence_bands - Bin by `xx` to calculate confidence intervals in `yy`.
- confidence_intervals - Compute the values bounding desired confidence intervals.
- cumstats - Calculate a cumulative average and standard deviation.
- log_normal_base_10 -
- percentiles -
- stats - Get basic statistics for the given array.
- stats_str - Return a string with the statistics of the given array.
- sigma - Convert from standard deviation to percentiles.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import warnings
import numpy as np
import scipy as sp
import scipy.stats # noqa
from zcode import utils
from zcode.math import math_core
__all__ = [
'confidence_bands', 'confidence_intervals',
'cumstats', 'frac_str', 'info', 'log_normal_base_10', 'mean',
'percs_from_sigma', 'quantiles', 'random_power', 'sigma',
'stats', 'stats_str', 'std',
'LH_Sampler',
# DEPRECATED
'percentiles'
]
def confidence_bands(xx, yy, xbins=10, xscale='lin', percs=[0.68, 0.95], filter=None):
"""Bin the given data with respect to `xx` and calculate confidence intervals in `yy`.
Arguments
---------
xx : array_like scalars
Data values for the axis by which to bin.
yy : array_like scalars
Data values for the axis in which to calculate confidence intervals, with values
corresponding to each of the `xx` values. Must have the same number of elements
as `xx`.
xbins : int or array_like of scalar
Specification for bins in `xx`. Either a
* int, describing the number of bins `N` to create automatically with scale `xscale`.
* array_like scalar, describing the `N+1` edges of each bin (left and right).
xscale : str
Specification of xbin scaling if bins are to be calculated automatically, {'lin', 'log'}.
Ignored if bin edges are given explicitly to `xbins`.
confInt : scalar or array_like of scalar
The percentage confidence intervals to calculate (e.g. 0.5 for median).
Must be between {0.0, 1.0}.
filter : str or `None`
Returns
-------
(for number of bins `N`)
count : (N,) array of int
The number of points in each xbin.
med : (N,) array of float
The median value of points in each bin
conf : array or ndarray of float
Values describing the confidence intervals.
If a single `confInt` is given, this will have shape (N,2);
If `M` `confInt` values are given, this will have shape (N,M,2)
Where in each case the 0th and 1st element of the last dimension is the lower and upper
confidence bounds respectively.
xbins : (N+1,) array of float
Location of bin edges.
"""
squeeze = False
if not np.iterable(percs):
squeeze = True
percs = [percs]
xx = np.asarray(xx).flatten()
yy = np.asarray(yy).flatten()
if xx.shape != yy.shape:
errStr = "Shapes of `xx` and `yy` must match ('{}' vs. '{}'."
errStr = errStr.format(str(xx.shape), str(yy.shape))
raise ValueError(errStr)
# Filter based on whether `yy` values match `filter` comparison to 0.0
if filter is not None:
compFunc = math_core._comparison_function(filter)
inds = compFunc(yy, 0.0)
xx = xx[inds]
yy = yy[inds]
# Create bins
xbins = math_core.asBinEdges(xbins, xx, scale=xscale)
nbins = xbins.size - 1
# Find the entries corresponding to each bin
groups = math_core.groupDigitized(xx, xbins[1:], edges='right')
# Allocate storage for results
med = np.zeros(nbins)
conf = np.zeros((nbins, np.size(percs), 2))
count = np.zeros(nbins, dtype=int)
# Calculate medians and confidence intervals
for ii, gg in enumerate(groups):
count[ii] = np.size(gg)
if count[ii] == 0: continue
mm, cc = confidence_intervals(yy[gg], percs=percs)
med[ii] = mm
conf[ii, ...] = cc[...]
if squeeze:
conf = conf.squeeze()
return count, med, conf, xbins
def confidence_intervals(vals, sigma=None, percs=None, weights=None, axis=None,
filter=None, return_ci=False,
# DEPRECATED ARGUMENTS:
ci=None):
"""Compute the values bounding the target confidence intervals for an array of data.
Arguments
---------
vals : array_like of scalars
Data over which to calculate confidence intervals.
This can be an arbitrarily shaped ndarray.
sigma : (M,) array_like of float
Confidence values as standard-deviations, converted to percentiles.
percs : (M,) array_like of floats
List of desired confidence intervals as fractions (e.g. `[0.68, 0.95]`)
axis : int or None
Axis over which to calculate confidence intervals, or 'None' to marginalize over all axes.
filter : str or `None`
Filter the input array with a boolean comparison to zero.
If no values remain after filtering, ``NaN, NaN`` is returned.
return_ci : bool
Return the confidence-interval values used (i.e. percentiles)
ci : DEPRECATED, use `percs` instead
Returns
-------
med : scalar
Median of the input data.
`None` if there are no values (e.g. after filtering).
conf : ([L, ]M, 2) ndarray of scalar
Bounds for each confidence interval. Shape depends on the number of confidence intervals
passed in `percs`, and the input shape of `vals`.
`None` if there are no values (e.g. after filtering).
If `vals` is 1D or `axis` is 'None', then the output shape will be (M, 2).
If `vals` has more than one-dimension, and `axis` is not 'None', then the shape `L`
will be the shape of `vals`, minus the `axis` axis.
For example,
if ``vals.shape = (4,3,5)` and `axis=1`, then `L = (4,5)`
the final output shape will be: (4,5,M,2).
percs : (M,) ndarray of float, optional
The percentile-values used for calculating confidence intervals.
Only returned if `return_ci` is True.
"""
percs = utils.dep_warn_var("ci", ci, "percs", percs)
if percs is not None and sigma is not None:
raise ValueError("Only provide *either* `percs` or `sigma`!")
if percs is None:
if sigma is None:
sigma = [1.0, 2.0, 3.0]
percs = percs_from_sigma(sigma)
percs = np.atleast_1d(percs)
if np.any(percs < 0.0) or np.all(percs > 1.0):
raise ValueError("`percs` must be [0.0, 1.0]! {}".format(stats_str(percs)))
# PERC_FUNC = np.percentile
def PERC_FUNC(xx, pp, **kwargs):
return quantiles(xx, pp/100.0, weights=weights, **kwargs)
# Filter input values
if filter is not None:
# Using the filter will flatten the array, so `axis` wont work...
kw = {}
if (axis is not None) and np.ndim(vals) > 1:
kw['axis'] = axis
if weights is not None:
raise NotImplementedError("`weights` argument does not work with `filter`!")
vals = math_core.comparison_filter(vals, filter, mask=True) # , **kw)
# vals = np.ma.filled(vals, np.nan)
# PERC_FUNC = np.nanpercentile # noqa
if vals.size == 0:
return np.nan, np.nan
# Calculate confidence-intervals and median
cdf_vals = np.array([(1.0-percs)/2.0, (1.0+percs)/2.0]).T
# This produces an ndarray with shape `[M, 2(, L)]`
# If ``axis is None`` or `np.ndim(vals) == 1` then the shape will be simply `[M, 2]`
# Otherwise, `L` will be the shape of `vals` without axis `axis`.
conf = [[PERC_FUNC(vals, 100.0*cdf[0], axis=axis),
PERC_FUNC(vals, 100.0*cdf[1], axis=axis)]
for cdf in cdf_vals]
conf = np.array(conf)
# Reshape from `[M, 2, L]` to `[L, M, 2]`
if (np.ndim(vals) > 1) and (axis is not None):
conf = np.moveaxis(conf, -1, 0)
med = PERC_FUNC(vals, 50.0, axis=axis)
if len(conf) == 1:
conf = conf[0]
if return_ci:
return med, conf, percs
return med, conf
def cumstats(arr):
"""Calculate a cumulative average and standard deviation.
Arguments
---------
arr <flt>[N] : input array
Returns
-------
ave <flt>[N] : cumulative average over ``arr``
std <flt>[N] : cumulative standard deviation over ``arr``
"""
tot = len(arr)
num = np.arange(tot)
std = np.zeros(tot)
# Cumulative sum
sm1 = np.cumsum(arr)
# Cumulative sum of squares
sm2 = np.cumsum(np.square(arr))
# Cumulative average
ave = sm1/(num+1.0)
std[1:] = np.fabs(sm2[1:] - np.square(sm1[1:])/(num[1:]+1.0))/num[1:]
std[1:] = np.sqrt(std[1:])
return ave, std
def frac_str(num, den=None, frac_fmt=None, dec_fmt=None):
"""Create a string of the form '{}/{} = {}' for reporting fractional values.
"""
if den is None:
assert num.dtype == bool, "If no `den` is given, array must be boolean!"
den = num.size
num = np.count_nonzero(num)
try:
dec_frac = num / den
except ZeroDivisionError:
dec_frac = np.nan
if frac_fmt is None:
frac_exp = np.fabs(np.log10([num, den]))
if np.any(frac_exp >= 4):
frac_fmt = ".1e"
else:
frac_fmt = "d"
if dec_fmt is None:
dec_exp = np.fabs(np.log10(dec_frac))
if dec_exp > 3:
dec_fmt = ".3e"
else:
dec_fmt = ".4f"
fstr = "{num:{ff}}/{den:{ff}} = {frac:{df}}".format(
num=num, den=den, frac=dec_frac, ff=frac_fmt, df=dec_fmt)
return fstr
def info(array, shape=True, sample=3, stats=True):
rv = ""
if shape:
rv += "{} ".format(np.shape(array))
if (sample is not None) and (sample > 0):
rv += "{} ".format(math_core.str_array(array, sides=sample))
if stats:
rv += "{} ".format(stats_str(array, label=False))
return rv
def log_normal_base_10(mu, sigma, size=None, shift=0.0):
"""Draw from a lognormal distribution with values in base-10 (instead of e).
Arguments
---------
mu : (N,) scalar
Mean of the distribution in linear space (e.g. 1.0e8 instead of 8.0).
sigma : (N,) scalar
Variance of the distribution *in dex* (e.g. 1.0 means factor of 10.0 variance)
size : (M,) int
Desired size of sample.
Returns
-------
dist : (M,...) scalar
Resulting distribution of values (in linear space).
"""
_sigma = np.log(10**sigma)
dist = np.random.lognormal(np.log(mu) + shift*np.log(10.0), _sigma, size)
return dist
def mean(vals, weights=None, **kwargs):
if weights is None:
return np.mean(vals, **kwargs)
ave = np.sum(vals*weights, **kwargs) / np.sum(weights, **kwargs)
return ave
def percentiles(*args, **kwargs):
utils.dep_warn("percentiles", newname="quantiles")
return quantiles(*args, **kwargs)
def quantiles(values, percs=None, sigmas=None, weights=None, axis=None,
values_sorted=False, filter=None):
"""Compute weighted percentiles.
Copied from @Alleo answer: http://stackoverflow.com/a/29677616/230468
NOTE: if `values` is a masked array, then only unmasked values are used!
Arguments
---------
values: (N,)
input data
percs: (M,) scalar [0.0, 1.0]
Desired percentiles of the data.
weights: (N,) or `None`
Weighted for each input data point in `values`.
values_sorted: bool
If True, then input values are assumed to already be sorted.
Returns
-------
percs : (M,) float
Array of percentiles of the weighted input data.
"""
if filter is not None:
values = math_core.comparison_filter(values, filter)
if not isinstance(values, np.ma.MaskedArray):
values = np.asarray(values)
if percs is None:
percs = sp.stats.norm.cdf(sigmas)
if np.ndim(values) > 1:
if axis is None:
values = values.flatten()
else:
if axis is not None:
raise ValueError("Cannot act along axis '{}' for 1D data!".format(axis))
percs = np.array(percs)
if weights is None:
weights = np.ones_like(values)
weights = np.array(weights)
try:
weights = np.ma.masked_array(weights, mask=values.mask)
except AttributeError:
pass
assert np.all(percs >= 0.0) and np.all(percs <= 1.0), 'percentiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values, axis=axis)
values = np.take_along_axis(values, sorter, axis=axis)
weights = np.take_along_axis(weights, sorter, axis=axis)
if axis is None:
weighted_quantiles = np.cumsum(weights) - 0.5 * weights
weighted_quantiles /= np.sum(weights)
percs = np.interp(percs, weighted_quantiles, values)
return percs
weights = np.moveaxis(weights, axis, -1)
values = np.moveaxis(values, axis, -1)
weighted_quantiles = np.cumsum(weights, axis=-1) - 0.5 * weights
weighted_quantiles /= np.sum(weights, axis=-1)[..., np.newaxis]
# weighted_quantiles = np.moveaxis(weighted_quantiles, axis, -1)
percs = [np.interp(percs, weighted_quantiles[idx], values[idx])
for idx in np.ndindex(values.shape[:-1])]
percs = np.array(percs)
return percs
def percs_from_sigma(sigma, side='in', boundaries=False):
"""Convert from standard deviation 'sigma' to percentiles in/out-side the normal distribution.
Arguments
---------
sig : (N,) array_like scalar
Standard deviations.
side : str, {'in', 'out'}
Calculate percentiles inside (i.e. [-sig, sig]) or ouside (i.e. [-inf, -sig] U [sig, inf])
boundaries : bool
Whether boundaries should be given ('True'), or the area ('False').
Returns
-------
vals : (N,) array_like scalar
Percentiles corresponding to the input `sig`.
"""
if side.startswith('in'):
inside = True
elif side.startswith('out'):
inside = False
else:
raise ValueError("`side` = '{}' must be {'in', 'out'}.".format(side))
# From CDF from -inf to `sig`
cdf = sp.stats.norm.cdf(sigma)
# Area outside of [-sig, sig]
vals = 2.0 * (1.0 - cdf)
# Convert to area inside [-sig, sig]
if inside:
vals = 1.0 - vals
# Convert from area to locations of boundaries (fractions)
if boundaries:
if inside:
vlo = 0.5*(1 - vals)
vhi = 0.5*(1 + vals)
else:
vlo = 0.5*vals
vhi = 1.0 - 0.5*vals
return vlo, vhi
return vals
def random_power(extr, pdf_index, size=1, **kwargs):
"""Draw from power-law PDF with the given extrema and index.
FIX/BUG : negative `extr` values break `pdf_index=-1` !!
Arguments
---------
extr : array_like scalar
The minimum and maximum value of this array are used as extrema.
pdf_index : scalar
The power-law index of the PDF distribution to be drawn from. Any real number is valid,
positive or negative.
NOTE: the `numpy.random.power` function uses the power-law index of the CDF, i.e. `g+1`
size : scalar
The number of points to draw (cast to int).
**kwags : dict pairs
Additional arguments passed to `zcode.math_core.minmax` with `extr`.
Returns
-------
rv : (N,) scalar
Array of random variables with N=`size` (default, size=1).
"""
# if not np.isscalar(pdf_index):
# err = "`pdf_index` (shape {}; {}) must be a scalar value!".format(
# np.shape(pdf_index), pdf_index)
# raise ValueError(err)
extr = math_core.minmax(extr, **kwargs)
if pdf_index == -1:
rv = 10**np.random.uniform(*np.log10(extr), size=int(size))
else:
rr = np.random.random(size=int(size))
gex = extr ** (pdf_index+1)
rv = (gex[0] + (gex[1] - gex[0])*rr) ** (1./(pdf_index+1))
return rv
def sigma(*args, **kwargs):
# ---- DECPRECATION SECTION ----
utils.dep_warn("sigma", newname="percs_from_sigma")
# ------------------------------
return percs_from_sigma(*args, **kwargs)
def stats(vals, median=False):
"""Get basic statistics for the given array.
Arguments
---------
vals <flt>[N] : input array
median <bool> : include median in return values
Returns
-------
ave <flt>
std <flt>
[med <flt>] : median, returned if ``median`` is `True`
"""
ave = np.average(vals)
std = np.std(vals)
if(median):
med = np.median(vals)
return ave, std, med
return ave, std
def stats_str(data, percs=[0.0, 0.16, 0.50, 0.84, 1.00], ave=False, std=False, weights=None,
format=None, log=False, label=True, label_log=True, filter=None):
"""Return a string with the statistics of the given array.
Arguments
---------
data : ndarray of scalar
Input data from which to calculate statistics.
percs : array_like of scalars in {0, 100}
Which percentiles to calculate.
ave : bool
Include average value in output.
std : bool
Include standard-deviation in output.
format : str
Formatting for all numerical output, (e.g. `":.2f"`).
log : bool
Convert values to log10 before printing.
label : bool
Add label for which percentiles are being printed
label_log : bool
If `log` is also true, append a string saying these are log values.
Output
------
out : str
Single-line string of the desired statistics.
"""
# data = np.array(data).astype(np.float)
data = np.array(data)
if filter is not None:
data = math_core.comparison_filter(data, filter)
if np.size(data) == 0:
return "empty after filtering"
if log:
data = np.log10(data)
percs = np.atleast_1d(percs)
if np.any(percs > 1.0):
warnings.warn("WARNING: zcode.math.statistic: input `percs` should be [0.0, 1.0], "
"dividing these by 100.0!")
percs /= 100.0
percs_flag = False
if (percs is not None) and len(percs):
percs_flag = True
out = ""
if format is None:
allow_int = False if (ave or std) else True
format = math_core._guess_str_format_from_range(data, allow_int=allow_int)
# If a `format` is given, but missing the colon, add the colon
if len(format) and not format.startswith(':'):
format = ':' + format
form = "{{{}}}".format(format)
# Add average
if ave:
out += "ave = " + form.format(np.average(data))
if std or percs_flag:
out += ", "
# Add standard-deviation
if std:
out += "std = " + form.format(np.std(data))
if percs_flag:
out += ", "
# Add percentiles
if percs_flag:
tiles = quantiles(data, percs, weights=weights).astype(data.dtype)
out += "(" + ", ".join(form.format(tt) for tt in tiles) + ")"
if label:
out += ", for (" + ", ".join("{:.0f}%".format(100*pp) for pp in percs) + ")"
# Note if these are log-values
if log and label_log:
out += " (log values)"
return out
def std(vals, weights=None, **kwargs):
"""
See: https://www.itl.nist.gov/div898/software/dataplot/refman2/ch2/weightsd.pdf
"""
if weights is None:
return np.std(vals, **kwargs)
mm = np.count_nonzero(weights)
ave = mean(vals, weights=weights, **kwargs)
num = np.sum(weights * (vals - ave)**2)
den = np.sum(weights) * (mm - 1) / mm
std = np.sqrt(num/den)
return std
class LH_Sampler:
"""
Much of this code was taken from the pyDOE project:
- https://github.com/tisimst/pyDOE
This code was originally published by the following individuals for use with
Scilab:
Copyright (C) 2012 - 2013 - <NAME>
Copyright (C) 2012 - <NAME>
Copyright (C) 2010 - 2011 - INRIA - <NAME>
Copyright (C) 2009 - <NAME>
Copyright (C) 2009 - CEA - <NAME>
website: forge.scilab.org/index.php/p/scidoe/sourcetree/master/macros
Much thanks goes to these individuals. It has been converted to Python by
<NAME>.
"""
'''
@classmethod
def oversample(cls, npar, nsamp, oversamp, **kwargs):
if not isinstance(oversamp, int) or oversamp < 1:
raise ValueError(f"`oversamp` argument '{oversamp}' must be an integer!")
samples = None
for ii in range(oversamp):
ss = cls.sample(npar, nsamp=nsamp, **kwargs)
if samples is None:
samples = ss
else:
samples = np.append(samples, ss, axis=-1)
return samples
'''
@classmethod
def sample(cls, vals, nsamp=None, **kwargs):
if isinstance(vals, int):
return cls.sample_unit(vals, nsamp=nsamp, **kwargs)
return cls.sample_vals(vals, nsamp=nsamp, **kwargs)
@classmethod
def sample_vals(cls, vals, nsamp=None, log=False, **kwargs):
vals = np.asarray(vals)
try:
npar, check = np.shape(vals)
if (check != 2) or (npar < 2):
raise ValueError
except ValueError:
print(f"vals = {vals}")
raise ValueError(f"Shape of `vals` ({np.shape(vals)}) must be (N,2)!")
if np.isscalar(log):
log = [log] * npar
if np.any([ll not in [True, False] for ll in log]):
raise ValueError(f"`log` value(s) must be 'True' or 'False'!")
# Draw samples in [0.0, 1.0]
samps = cls.sample_unit(npar, nsamp=nsamp, **kwargs)
# Map samples to the given ranges in log or linear space
for ii, vv in enumerate(vals):
if log[ii]:
vv = np.log10(vv)
# temp = np.copy(samps[ii, :])
# samps[ii, :] *= (vv.max() - vv.min())
# samps[ii, :] += vv.min()
samps[ii, :] = (vv.max() - vv.min()) * samps[ii, :] + vv.min()
if log[ii]:
samps[ii, :] = 10.0 ** samps[ii, :]
vv = 10.0 ** vv
# if np.any((samps[ii] < vv.min()) | (samps[ii] > vv.max())):
# print(f"temp = {temp}")
# print(f"vv = {vv}")
# err = (
# f"Samples ({stats_str(samps[ii])}) exceeded "
# f"values ({math_core.minmax(vv)})"
# )
# raise ValueError(err)
return samps
@classmethod
def sample_unit(cls, npar, nsamp=None, center=False, optimize=None, iterations=10):
if nsamp is None:
nsamp = npar
# Construct optimization variables/functions
optimize = None if (optimize is None) else optimize.lower()
if optimize is not None:
if optimize.startswith('dist'):
extr = 0.0
mask = | np.ones((nsamp, nsamp), dtype=bool) | numpy.ones |
# -*- coding: utf-8 -*-
###############################################################################
###############################################################################
import logging
import numpy as np
from skimage import io
# create logger
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
###############################################################################
###############################################################################
# global params
fld = 'data/'
satellite_images = ['20090526', '20110514', '20120524', '20130608',
'20140517', '20150507', '20160526']
train_images = satellite_images[:-1]
alt = 'DEM_altitude.tif'
slp = 'DEM_slope.tif'
def load_satellite_img(path, date, normalize=True):
img = io.imread(path + date + ".tif").astype(np.float32)
ndvi = io.imread(path + date + "_NDVI.tif").astype(np.float32)[..., None]
if normalize:
img /= 20000.0
ndvi /= 255.0 # TODO ask paul: too high ?
return img, ndvi
def load_satellite_mask(path: str, date: str):
return io.imread(path + date + "_mask_ls.tif").astype(np.bool)
def load_static_data(path: str, normalize: bool = True):
altitude = io.imread(path + alt).astype(np.float32)[..., None]
slope = io.imread(path + slp).astype(np.float32)[..., None]
if normalize:
altitude /= 2555.0
slope /= 52.0
return altitude, slope
def load_image_eval(path):
altitude, slope = load_static_data(path)
img1 = get_single_satellite_features(path, satellite_images[-1])
img2 = get_single_satellite_features(path, satellite_images[-2])
return | np.concatenate((img1, img2, altitude, slope), 2) | numpy.concatenate |
import os
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
# from keras_contrib.layers import InstanceNormalization
from tensorflow.keras.layers import Layer, InputSpec
PRETRAINED_WEIGHT_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "pretrained_weights")
# ref: https://stackoverflow.com/a/53349976/2447655
class ReflectionPadding2D(Layer):
def __init__(self, padding=(1, 1), **kwargs):
self.padding = tuple(padding)
self.input_spec = [InputSpec(ndim=4)]
super(ReflectionPadding2D, self).__init__(**kwargs)
def compute_output_shape(self, s):
""" If you are using "channels_last" configuration"""
return s[0], s[1] + 2 * self.padding[0], s[2] + 2 * self.padding[1], s[3]
def call(self, x):
w_pad, h_pad = self.padding
return tf.pad(x, [[0, 0], [h_pad, h_pad], [w_pad, w_pad], [0, 0]], 'REFLECT')
def conv_layer(style, name, filters, kernel_size, strides=(1, 1), bias=True):
init_weight = np.load(f"{PRETRAINED_WEIGHT_DIR}/{style}/{name}.weight.npy")
init_weight = np.transpose(init_weight, [2, 3, 1, 0])
init_bias = np.load(f"{PRETRAINED_WEIGHT_DIR}/{style}/{name}.bias.npy")
if bias:
bias_initializer = tf.keras.initializers.constant(init_bias)
else:
bias_initializer = "zeros"
layer = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
kernel_initializer=tf.keras.initializers.constant(init_weight),
bias_initializer=bias_initializer
)
return layer
def instance_norm_layer(style, name, epsilon=1e-9):
init_beta = np.load(f"{PRETRAINED_WEIGHT_DIR}/{style}/{name}.shift.npy")
init_gamma = | np.load(f"{PRETRAINED_WEIGHT_DIR}/{style}/{name}.scale.npy") | numpy.load |
# Copyright (c) 2022 ETH Zurich, <NAME>
# MIT License
# Load modules
import os
import numpy as np
from shapely.geometry import shape, box
import fiona
from scipy.spatial import cKDTree
import pygeos
import time
from skimage.measure import find_contours
# Load required functions
import functions_cy
###############################################################################
def get_GSHHS_coastlines(dom, path_GSHHG, path_temp):
"""Get relevant GSHHS coastline data.
Get relevant GSHHS coastline data for rectangular latitude/longitude
domain.
Parameters
----------
dom : dict
Specifications of rectangular latitude/longitude domain
('lat_min', 'lat_max', 'lon_min', 'lon_max')
path_GSHHG: str
Path to folder of GSHHS data with shapefile 'GSHHS_f_L1.shp'
path_temp: str
Path to temporary directory in which bounding boxes for coastline
polygons are cached
Returns
-------
poly_coastlines : list
Relevant coastline polygons as Shapely polygons
Notes
-----
Source of GSHHS data: https://www.soest.hawaii.edu/pwessel/gshhg/"""
# Check arguments
keys_req = ("lon_min", "lon_max", "lat_min", "lat_max")
if not set(keys_req).issubset(set(dom.keys())):
raise ValueError("one or multiple key(s) are missing in 'dom'")
if (dom["lon_min"] >= dom["lon_max"])\
or (dom["lat_min"] >= dom["lat_max"]):
raise ValueError("invalid domain extent")
if not os.path.isfile(path_GSHHG + "GSHHS_f_L1.shp"):
raise ValueError("file 'GSHHS_f_L1.shp' not found in provided "
+ "path for GSHHG data")
if not os.path.isdir(path_temp):
raise ValueError("temporary directory does not exist")
t_beg_func = time.time()
# Compute and save bounding boxes of coastlines polygons
file_bbc = path_temp + "Bounding_boxes_coastlines.npy"
if not os.path.isfile(file_bbc):
t_beg = time.time()
ds = fiona.open(path_GSHHG + "GSHHS_f_L1.shp")
bounds = np.empty((len(ds), 4), dtype=np.float32)
for idx, var in enumerate(ds):
bounds[idx, :] = shape(var["geometry"]).bounds
# (lon_min, lat_min, lon_max, lat_max)
ds.close()
np.save(file_bbc, bounds)
print("Bounding boxes for coastline polygons computed "
+ "(%.2f" % (time.time() - t_beg) + " s)")
# Find relevant polygons for domain
bounds = np.load(file_bbc)
geoms = pygeos.box(bounds[:, 0], bounds[:, 1], bounds[:, 2], bounds[:, 3])
tree = pygeos.STRtree(geoms)
quer_rang = [dom["lon_min"], dom["lat_min"],
dom["lon_max"], dom["lat_max"]]
ind = tree.query(pygeos.box(*quer_rang))
# Load relevant polygons
ds = fiona.open(path_GSHHG + "GSHHS_f_L1.shp")
poly_all = [shape(ds[int(i)]["geometry"]) for i in ind]
ds.close()
print("Number of polygons: " + str(len(poly_all)))
# Crop polygons (if necessary)
quer_rang_s = box(*quer_rang)
poly_coastlines = []
for i in poly_all:
if quer_rang_s.contains(i):
poly_coastlines.append(i)
elif quer_rang_s.intersects(i):
poly_coastlines.append(quer_rang_s.intersection(i))
print("Run time: %.2f" % (time.time() - t_beg_func) + " s")
return poly_coastlines
###############################################################################
def coastline_contours(lon, lat, mask_bin):
"""Compute coastline contours.
Compute coastline contours from binary land-sea mask.
Parameters
----------
lon : ndarray of double
Array (1-dimensional) with geographic longitude [degree]
lat: ndarray of double
Array (1-dimensional) with geographic latitude [degree]
mask_bin: str
Array (2-dimensional) with binary land-sea mask (0: water, 1: land)
Returns
-------
contours_latlon : list
List with contour lines in latitude/longitude coordinates [degree]"""
# Check arguments
if (lat.ndim != 1) or (lon.ndim != 1):
raise ValueError("Input coordinates arrays must be 1-dimensional")
if (mask_bin.shape[0] != len(lat)) or (mask_bin.shape[1] != len(lon)):
raise ValueError("Input data has inconsistent dimension length(s)")
if (mask_bin.dtype != "uint8") or (len(np.unique(mask_bin)) != 2) \
or (not np.all( | np.unique(mask_bin) | numpy.unique |
import unittest
import numpy as np
from linearclassifier import LinearClassifier
class LinearClassifierTests(unittest.TestCase):
def test_3nodes_nobias_1record_1iterations_assert_weight_increases(self):
x_train = np.array([[0.1, 0.1, 0.1,]])
y_train = np.array([[1]])
weights = [np.array([[0.1, -0.9, 0.9]])]
weights_copy = weights[0].copy()
# x_train * weights = [0.01, -0.09, 0.9]
# To get to 1, all weights should increase
net = LinearClassifier(layers_weights=weights, with_bias=False)
_, _, cost_derivative = net.run_iteration(x_train, y_train)
# this makes weight value increase
assert np.all(cost_derivative[0] < 0)
# assert that all weights values increased
assert np.all(net.layers_weights[0] - weights_copy > 0)
def test_1node_nobias_1record_2iterations_oppositeweights(self):
x_train = np.array([[0.1]])
y_train = np.array([[1]])
weights = [np.array([[0.1]])]
# x_train * weights = 0.01.
# To get to 1, the weight should increase
net = LinearClassifier(layers_weights=weights, with_bias=False)
first_cost, _, _ = net.run_iteration(x_train, y_train, compute_cost=True)
snd_cost, _, _ = net.run_iteration(x_train, y_train, compute_cost=True)
# cost should decrease
assert first_cost - snd_cost > 0
def test_1nodes_nobias_1record_1iterations_assert_weight_decreases(self):
x_train = np.array([[0.9]])
y_train = np.array([[0]])
weights = [np.array([[0.9]])]
weights_copy = weights[0].copy()
net = LinearClassifier(layers_weights=weights, with_bias=False)
_, _, cost_derivative = net.run_iteration(x_train, y_train)
# this makes weight value decrease
assert np.all(cost_derivative[0] > 0)
# assert that weight value decreased
assert np.all(net.layers_weights[0] - weights_copy < 0)
def test_noRegularization_highweights(self):
# Having high weights means that the network is overfitting the training data
# and cannot (has no power) generalize well anymore (for unseen data)
x_train = np.array([[0.5]])
y_train = np.array([[1]])
weights = [np.array([[0.7]])]
net = LinearClassifier(layers_weights=weights, regularization_value=0, with_bias=False)
for _ in range(1000):
net.run_iteration(x_train, y_train)
assert np.all(net.layers_weights[0] > 5)
def test_regularization_controlledweights(self):
# same test as above, but with regularization value, that limits (does not allow)
# for the weight to have a high value
x_train = | np.array([[0.5]]) | numpy.array |
import numpy as np
import pandas as pd
from simanneal import Annealer
from ..logger import create_null_logger
class EqualWeightModelSelector:
def __init__(self, execution_cost: float,
assets: float, logger=None):
self._execution_cost = execution_cost
self._assets = assets
self._logger = create_null_logger() if logger is None else logger
def select_model(self, params):
df_ret = params.df_ret
df_position = params.df_position
df_ret = df_ret * df_position
# add execution cost
df_ret -= df_position.diff(1).fillna(0).abs() * self._execution_cost
# aggregate symbol
df_ret = df_ret.groupby(level='model_id', axis=1).sum()
self._logger.debug('EqualWeightModelSelector.select_model df_ret statistics')
for model_id in df_ret.columns:
self._logger.debug('{} mean {} std {} sharpe {}'.format(
model_id,
df_ret[model_id].mean(),
df_ret[model_id].std(),
df_ret[model_id].mean() / (1e-37 + df_ret[model_id].std()),
))
# 最適化
problem = Problem(
np.zeros(df_ret.shape[1], dtype=np.bool),
ret_numpy=df_ret.values,
price_numpy=params.df_current.loc[df_ret.columns, 'price'].values,
assets=self._assets,
budget=int(params.budget),
random_state=params.random_state,
)
# TODO: anneal depends random.random
x, energy = problem.anneal()
if energy > 0:
x = np.zeros(df_ret.shape[1], dtype=np.bool)
return pd.DataFrame(
np.ones((np.sum(x), 1)) / (1e-37 + np.sum(x)),
index=df_ret.columns[x], columns=['weight']
)
class Problem(Annealer):
def __init__(self, state, ret_numpy=None, price_numpy=None, assets=None, budget=None, random_state=None):
super().__init__(state, disable_signal=True)
self._ret_numpy = ret_numpy
self._price_numpy = price_numpy
self._assets = assets
self._budget = budget
self._rs = np.random.RandomState(random_state)
def move(self):
x = self.state
rs = self._rs
sum_x = np.sum(x * 1.0)
if rs.randint(2) == 0:
if sum_x > 0:
self.state[rs.choice( | np.arange(x.size) | numpy.arange |
# -*- coding: utf-8 -*-
""" :class:`MAB`, :class:`MarkovianMAB`, :class:`ChangingAtEachRepMAB`, :class:`IncreasingMAB`, :class:`PieceWiseStationaryMAB` and :class:`NonStationaryMAB` classes to wrap the arms of some Multi-Armed Bandit problems.
Such class has to have *at least* these methods:
- ``draw(armId, t)`` to draw *one* sample from that ``armId`` at time ``t``,
- and ``reprarms()`` to pretty print the arms (for titles of a plot),
- and more, see below.
.. warning:: FIXME it is still a work in progress, I need to add continuously varying environments. See https://github.com/SMPyBandits/SMPyBandits/issues/71
"""
from __future__ import division, print_function # Python 2 compatibility
__author__ = "<NAME>"
__version__ = "0.9"
import numpy as np
import matplotlib.pyplot as plt
try:
from .pykov import Chain
except ImportError as e:
try:
from pykov import Chain
except ImportError:
print("Warning: 'pykov' module seems to not be available. But it is shipped with SMPyBandits. Weird.")
print("Dou you want to try to install it from https://github.com/riccardoscalco/Pykov ?")
print("Warning: the 'MarkovianMAB' class will not work...")
# Local imports
try:
from .plotsettings import signature, wraptext, wraplatex, palette, makemarkers, legend, show_and_save
except ImportError:
from plotsettings import signature, wraptext, wraplatex, palette, makemarkers, legend, show_and_save
class MAB(object):
""" Basic Multi-Armed Bandit problem, for stochastic and i.i.d. arms.
- configuration can be a dict with 'arm_type' and 'params' keys. 'arm_type' is a class from the Arms module, and 'params' is a dict, used as a list/tuple/iterable of named parameters given to 'arm_type'. Example::
configuration = {
'arm_type': Bernoulli,
'params': [0.1, 0.5, 0.9]
}
configuration = { # for fixed variance Gaussian
'arm_type': Gaussian,
'params': [0.1, 0.5, 0.9]
}
- But it can also accept a list of already created arms::
configuration = [
Bernoulli(0.1),
Bernoulli(0.5),
Bernoulli(0.9),
]
- Both will create three Bernoulli arms, of parameters (means) 0.1, 0.5 and 0.9.
"""
def __init__(self, configuration):
"""New MAB."""
print("\n\nCreating a new MAB problem ...") # DEBUG
self.isChangingAtEachRepetition = False #: Flag to know if the problem is changing at each repetition or not.
self.isDynamic = False #: Flag to know if the problem is static or not.
self.isMarkovian = False #: Flag to know if the problem is Markovian or not.
self.arms = [] #: List of arms
self._sparsity = None
if isinstance(configuration, dict):
print(" Reading arms of this MAB problem from a dictionnary 'configuration' = {} ...".format(configuration)) # DEBUG
arm_type = configuration["arm_type"]
print(" - with 'arm_type' =", arm_type) # DEBUG
params = configuration["params"]
print(" - with 'params' =", params) # DEBUG
# Each 'param' could be one value (eg. 'mean' = probability for a Bernoulli) or a tuple (eg. '(mu, sigma)' for a Gaussian) or a dictionnary
for param in params:
self.arms.append(arm_type(*param) if isinstance(param, (dict, tuple, list)) else arm_type(param))
# XXX try to read sparsity
self._sparsity = configuration["sparsity"] if "sparsity" in configuration else None
else:
print(" Taking arms of this MAB problem from a list of arms 'configuration' = {} ...".format(configuration)) # DEBUG
for arm in configuration:
self.arms.append(arm)
# Compute the means and stats
print(" - with 'arms' =", self.arms) # DEBUG
self.means = np.array([arm.mean for arm in self.arms]) #: Means of arms
print(" - with 'means' =", self.means) # DEBUG
self.nbArms = len(self.arms) #: Number of arms
print(" - with 'nbArms' =", self.nbArms) # DEBUG
if self._sparsity is not None:
print(" - with 'sparsity' =", self._sparsity) # DEBUG
self.maxArm = | np.max(self.means) | numpy.max |
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
def modaddplot(df):
preproc = pd.DataFrame(
{
"Bits": list(df[df.method == "ModAddBig"].bits),
"Go's big.Int": list(df[df.method == "ModAddBig"].ns / 1000),
"saferith": list(df[df.method == "ModAddNat"].ns / 1000),
}
)
plt.clf()
ax = plt.subplot(1, 1, 1)
preproc.plot(x="Bits", y="Go's big.Int", ax=ax, legend=False, color="r", ylabel="μs")
preproc.plot(x="Bits", y="saferith", ax=ax, legend=False, xlabel="", ylabel="μs")
plt.xlabel('Significant Bits')
plt.xticks(np.arange(0, 4 * 1024 + 1, 1024))
ax.figure.legend(bbox_to_anchor=(1.0, 1.06), loc="upper right")
plt.title("Execution time of Modular Addition with 2048 bit modulus", loc="left")
fig = plt.gcf()
fig.set_size_inches(8.1, 4)
plt.tight_layout()
plt.savefig("./.out/modadd.png", bbox_inches="tight")
pass
def expplot(df):
preproc = pd.DataFrame(
{
"Hamming Weight": list(df[df.method == "ModExpBig"].bits),
"Go's big.Int": list(df[df.method == "ModExpBig"].ns / 1000),
"saferith": list(df[df.method == "ModExpNat"].ns / 1000),
}
)
plt.clf()
ax = plt.subplot(1, 1, 1)
preproc.plot(
x="Hamming Weight", y="Go's big.Int", ax=ax, legend=False, color="r", ylabel="μs",
logy=False
)
preproc.plot(
x="Hamming Weight", y="saferith", ax=ax, legend=False, xlabel="", ylabel="μs",
logy=False
)
plt.xlabel('Number of 1 bits in exponent')
plt.xticks( | np.arange(0, 64 + 1, 16) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
同数のデータ点数を持つS, Tに対し、点ごとの対応が既知であるとして
点群間の平行移動・回転・スケーリングを推定する
"""
from typing import Tuple
from dataclasses import dataclass
import numpy as np
__all__ = ["MatchingResult", "minL2"]
@dataclass
class MatchingResult:
cost: float
offsetX: float
offsetY: float
angle: float
scale: float
movingCenterX: float
movingCenterY: float
def minL2(S: np.ndarray, T: np.ndarray) -> MatchingResult:
r"""Find (s, R, t) \in Sim(2) which minimizes sum_i || sRS_i + t - T_i ||^2.
Parameters
==========
S: (N, 2) array_like
Moving pointcloud.
T: (N, 2) array_like
Reference pointcloud.
Returns
=======
result: MatchingResult
"""
Smean = np.mean(S, axis=0)
Tmean = np.mean(T, axis=0)
S_ = S - Smean
T_ = T - Tmean
S_F2 = (S_ ** 2).sum()
T_F2 = (T_ ** 2).sum()
offset = Tmean - Smean
U, s, V = np.linalg.svd(S_.T @ T_)
rot = V @ U.T
angle = | np.arctan2(rot[1,0], rot[0,0]) | numpy.arctan2 |
import os
import torch
import logging
import numpy as np
from convlab2.util.train_util import to_device
import torch.nn as nn
from torch import optim
from convlab2.policy.mle.idea3.idea_3_max_margin import Reward_max_margin
import matplotlib.pyplot as plt
import pickle
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from convlab2.policy.mle.idea4.autoencoder import auto_encoder
class Reward_predict(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Reward_predict, self).__init__()
self.encoder_1 = nn.LSTM(input_size, output_size, batch_first=True, bidirectional=False)
self.encoder_2 = nn.LSTM(output_size, output_size)
self.m = nn.Sigmoid()
self.loss = nn.BCELoss(size_average=False, reduce=True)
self.cnn_belief = nn.Linear(input_size - output_size, output_size)
self.cnn_output = nn.Linear(output_size, output_size)
def forward(self, input_feature, input_belief, target):
# to construct the batch first, then we could compute the loss function for this stuff, simple and easy.
_, (last_hidden, last_cell) = self.encoder_1(input_feature)
# second Part
_, (predict_action, last_cell) = self.encoder_2(self.cnn_belief(input_belief), (last_hidden, last_cell))
loss = self.loss(self.m(self.cnn_output(predict_action)), target)
return loss
class MLE_Trainer_Abstract():
def __init__(self, manager, cfg):
self._init_data(manager, cfg)
self.policy = None
self.policy_optim = None
# this is for fake data generator
self.generator_fake = None
# define the stuff from the reward machine
def _init_data(self, manager, cfg):
self.data_train = manager.create_dataset('train', cfg['batchsz'])
self.data_valid = manager.create_dataset('val', cfg['batchsz'])
self.data_test = manager.create_dataset('test', cfg['batchsz'])
self.save_dir = cfg['save_dir']
self.print_per_batch = cfg['print_per_batch']
self.save_per_epoch = cfg['save_per_epoch']
self.multi_entropy_loss = nn.MultiLabelSoftMarginLoss()
self.loss_record = []
# stuff for idea 2
self.reward_predictor = Reward_predict(549, 457, 209)
self.reward_optim = optim.Adam(self.reward_predictor.parameters(), lr=1e-4)
# stuff for idea 3
self.reward_predictor_idea3 = Reward_max_margin(549, 209)
self.reward_optim_idea3 = optim.Adam(self.reward_predictor_idea3.parameters(), lr=1e-4)
# init the terminate state and use if when training our model.
self.terminate_train = {}
self.state_whole = {}
self.success = []
self.success_plot = []
# load data of terminate
for part in ['train', 'val', 'test']:
with open(os.path.join("//home//raliegh//图片//ConvLab-2//convlab2//policy//mle//multiwoz//processed_data",
'{}_terminate.pkl'.format(part)), 'rb') as f:
self.terminate_train[part] = pickle.load(f)
# load data of state_whole
for part in ['train', 'val', 'test']:
with open(os.path.join("//home//raliegh//图片//ConvLab-2//convlab2//policy//mle//multiwoz//processed_data",
'{}_state_whole.pkl'.format(part)), 'rb') as f:
self.state_whole[part] = pickle.load(f)
def policy_loop(self, data):
# this is from states to predict the a, pretty similar to idea2
s, target_a = to_device(data)
a_weights = self.policy(s)
loss_a = self.multi_entropy_loss(a_weights, target_a)
return loss_a
def reward_training(self, epoch):
self.reward_predictor.train()
s_temp = torch.tensor([])
a_temp = torch.tensor([])
loss = torch.tensor([0]).float()
for i, data in enumerate(self.data_train):
# curr_state is everything, contains domain, action, bf, and also user action.
curr_state = self.state_whole["train"][i]
fake_action = self.generator_fake.predict(curr_state)
s, a = to_device(data)
# s_temp = s[:i+1]
try:
s_temp = torch.cat((s_temp, s), 0)
a_temp = torch.cat((a_temp, a), 0)
except Exception as e:
s_temp = s
a_temp = a
s_train = s_temp.unsqueeze(0)
a_train = a_temp.unsqueeze(0)
# print(s_train.shape)
s_train_np = np.array(s_train)
if len(s_train[0]) >= 2:
# print("-"*300)
input_pre = torch.cat((s_train, a_train), 2)[0][:-1].unsqueeze(0)
input_bf = s_train[0][-1].unsqueeze(0).unsqueeze(0)
target = a_train[0][-1].unsqueeze(0).unsqueeze(0)
# print(input_pre.shape,input_bf.shape,target.shape)
terminate = self.terminate_train["train"][i]
if terminate == False:
micro_loss = self.reward_predictor(input_pre, input_bf, target)
loss += micro_loss
else:
# predict the last one and then loss backward and then clear the button
micro_loss = self.reward_predictor(input_pre, input_bf, target)
loss += micro_loss
# print(loss,loss/(len(s_temp)-3))
if loss != torch.tensor([0]).float():
# print(loss.item()/(len(iteration)-3),loss.shape)
loss.backward()
for name, param in self.reward_predictor.named_parameters():
if "cnn" not in name:
# print(name)
# print(param.grad)
pass
self.reward_optim.step()
self.reward_optim.zero_grad()
self.loss_record.append(loss.item())
# clear the button
s_temp = torch.tensor([])
a_temp = torch.tensor([])
loss = torch.tensor([0]).float()
# remember to save the model
if (epoch + 1) % self.save_per_epoch == 0:
self.reward_model_save(self.save_dir, epoch)
axis = [i for i in range(len(self.loss_record))]
plt.plot(axis, self.loss_record)
plt.xlabel('Number of turns')
plt.ylabel('Embedding Loss')
plt.show()
def reward_training_idea_3(self, epoch):
self.reward_predictor_idea3.train()
s_temp = torch.tensor([])
a_temp = torch.tensor([])
loss = torch.tensor([0]).float()
for i, data in enumerate(self.data_train):
# curr_state is everything, contains domain, action, bf, and also user action.
curr_state = self.state_whole["train"][i]
fake_action = self.generator_fake.predict(curr_state)
s, a = to_device(data)
# s_temp = s[:i+1]
try:
s_temp = torch.cat((s_temp, s), 0)
a_temp = torch.cat((a_temp, a), 0)
except Exception as e:
s_temp = s
a_temp = a
# [ , , ]
s_train = s_temp.unsqueeze(0)
a_train = a_temp.unsqueeze(0)
# print(s_train.shape)
if len(s_train[0]) >= 2:
# print("-"*300)
input_real = torch.cat((s_train, a_train), 2)[0][:-1].unsqueeze(0)
# construct the data from fake
#
a_train_pre = a_train[0][:-1]
fake_a = fake_action.unsqueeze(0).float()
a_train_fake = torch.cat((a_train_pre,fake_a),0)
input_fake = torch.cat((s_train,a_train_fake.unsqueeze(0)),2)
# constrcut stuff for advantage LSTM
s_train_pre = s_train[0][:-1]
# [ , , ]
input_pre = torch.cat((s_train_pre,a_train_pre),1).unsqueeze(0)
s_last = s_train[0][-1]
a_last = a_train[0][-1]
input_last_real = torch.cat((s_last,a_last)).unsqueeze(0).unsqueeze(0)
input_last_fake = torch.cat((s_last.unsqueeze(0),fake_a),1).unsqueeze(0)
# print(input_pre.shape,input_bf.shape,target.shape)
terminate = self.terminate_train["train"][i]
if terminate == False:
"""
micro_loss, res_1, res_2 = self.reward_predictor_idea3.loss(input_real,input_fake,a_temp[-1].unsqueeze(0),fake_a)
self.success.append(res_1)
self.success.append(res_2)
if len(self.success) == 100:
curr_res = np.sum(self.success)/100
print("fail: ", curr_res)
self.success_plot.append(curr_res)
self.success = []
"""
# """
# method 2
micro_loss, res = self.reward_predictor_idea3.loss_plus_lstm(input_real,input_fake)
self.success.append(res)
if len(self.success) == 100:
curr_res = np.sum(self.success)/100
print("fail: ", curr_res)
self.success_plot.append(curr_res)
self.success = []
# """
loss += micro_loss
else:
# predict the last one and then loss backward and then clear the button
"""
micro_loss, res_1, res_2 = self.reward_predictor_idea3.loss(input_real,input_fake,a_temp[-1].unsqueeze(0),fake_a)
self.success.append(res_1)
self.success.append(res_2)
if len(self.success) == 100:
curr_res = np.sum(self.success)/100
print("fail: ", curr_res)
self.success_plot.append(curr_res)
self.success = []
"""
# """
# method 2
micro_loss, res = self.reward_predictor_idea3.loss_plus_lstm(input_real,input_fake)
self.success.append(res)
if len(self.success) == 100:
curr_res = np.sum(self.success)/100
print("fail: ", curr_res)
self.success_plot.append(curr_res)
self.success = []
# """
loss += micro_loss
len_dia = len(s_temp)
# print(loss.item()/len_dia)
if loss != torch.tensor([0]).float():
# print(loss, loss.dtype)
loss.backward()
# to check if still have gradients
# for name, param in self.reward_predictor_idea3.named_parameters():
# if "cnn" not in name:
# print(name)
# print(param.grad)
self.reward_optim_idea3.step()
self.reward_optim_idea3.zero_grad()
self.loss_record.append(loss.item()/len_dia)
# clear the button
s_temp = torch.tensor([])
a_temp = torch.tensor([])
loss = torch.tensor([0]).float()
# remember to save the model
if (epoch + 1) % self.save_per_epoch == 0:
self.reward_model_save_idea3(self.save_dir, epoch)
print("total fail rate",np.sum(self.success)/len(self.success))
print(self.success)
print(self.success_plot)
plot_stuff = self.success_plot
# plot
axis = [i for i in range(len(plot_stuff))]
plt.plot(axis, plot_stuff)
plt.xlabel('Number of dialogues')
plt.ylabel('Embedding Loss')
plt.show()
def auto_encoder_training(self,epoch):
s_temp = torch.tensor([])
a_temp = torch.tensor([])
data_list = []
data_tensor = torch.tensor([])
for i, data in enumerate(self.data_train):
s, a = to_device(data)
try:
s_temp = torch.cat((s_temp, s), 0)
a_temp = torch.cat((a_temp, a), 0)
except Exception as e:
s_temp = s
a_temp = a
# [ , , ]
s_train = s_temp.unsqueeze(0)
a_train = a_temp.unsqueeze(0)
# print(s_train.shape)
if len(s_train[0]) >= 2:
input_real = torch.cat((s_train, a_train), 2)
terminate = self.terminate_train["train"][i]
if terminate == False:
"""
For simlicity, here I will not implement the auto encoder only for last stage.
"""
pass
else:
# predict the last one and then loss backward and then clear the button
"""
predict, compute loss, and went forward.
"""
# """
data_list.append(input_real)
pass
# """
# clear the button
s_temp = torch.tensor([])
a_temp = torch.tensor([])
input_real = torch.tensor([])
print("finish creating dataset for auto-encoder")
print("start training auto-encoder")
auto_encoder(data_list)
if (epoch + 1) % self.save_per_epoch == 0:
self.reward_model_save_idea3(self.save_dir, epoch)
print("total fail rate", | np.sum(self.success) | numpy.sum |
""" Core ingredients for RL algorithms.
Author: <NAME> (<EMAIL>)
based on: Spinning Up's Vanilla Policy Gradient
https://github.com/openai/spinningup/blob/master/spinup/algos/pytorch/vpg/core.py
"""
import numpy as np
import scipy.signal
from gym.spaces import Box, Discrete
import abc
import torch
import torch.nn as nn
import torch.optim as optim
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
from rl_safety_algorithms.common.online_mean_std import OnlineMeanStd
from rl_safety_algorithms.algs.vtrace import calculate_v_trace
import rl_safety_algorithms.common.mpi_tools as mpi_tools
registered_actors = dict() # global dict that holds pointers to functions
def get_optimizer(opt: str, module: torch.nn.Module, lr: float):
""" Returns an initialized optimizer from PyTorch."""
assert hasattr(optim, opt), f'Optimizer={opt} not found in torch.'
optimizer = getattr(optim, opt)
return optimizer(module.parameters(), lr=lr)
def initialize_layer(
init_function: str,
layer: torch.nn.Module
):
if init_function == 'kaiming_uniform': # this the default!
nn.init.kaiming_uniform_(layer.weight, a=np.sqrt(5))
elif init_function == 'xavier_normal':
nn.init.xavier_normal_(layer.weight)
# glorot is also known as xavier uniform
elif init_function == 'glorot' or init_function == 'xavier_uniform':
nn.init.xavier_uniform_(layer.weight)
elif init_function == 'orthogonal': # matches values from baselines repo.
nn.init.orthogonal_(layer.weight, gain=np.sqrt(2))
else:
raise NotImplementedError
# print(layer)
# print(layer.weight)
def register_actor(actor_name):
""" register actor into global dict"""
def wrapper(func):
registered_actors[actor_name] = func
return func
return wrapper
def get_registered_actor_fn(actor_type: str, distribution_type: str):
assert distribution_type == 'categorical' or distribution_type == 'gaussian'
actor_fn = actor_type + '_' + distribution_type
msg = f'Did not find: {actor_fn} in registered actors.'
assert actor_fn in registered_actors, msg
return registered_actors[actor_fn]
def combined_shape(length: int, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def convert_str_to_torch_functional(activation):
if isinstance(activation, str): # convert string to torch functional
activations = {
'identity': nn.Identity,
'relu': nn.ReLU,
'sigmoid': nn.Sigmoid,
'softplus': nn.Softplus,
'tanh': nn.Tanh
}
assert activation in activations
activation = activations[activation]
assert issubclass(activation, torch.nn.Module)
return activation
def build_mlp_network(
sizes,
activation,
output_activation='identity',
weight_initialization='kaiming_uniform'
):
activation = convert_str_to_torch_functional(activation)
output_activation = convert_str_to_torch_functional(output_activation)
layers = list()
for j in range(len(sizes) - 1):
act = activation if j < len(sizes) - 2 else output_activation
affine_layer = nn.Linear(sizes[j], sizes[j + 1])
initialize_layer(weight_initialization, affine_layer)
layers += [affine_layer, act()]
return nn.Sequential(*layers)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[
::-1]
# ====================================
# Algorithm Classes
# ====================================
class Algorithm(abc.ABC):
@abc.abstractmethod
def learn(self) -> tuple:
pass
@abc.abstractmethod
def log(self, epoch: int):
pass
@abc.abstractmethod
def update(self):
pass
class PolicyGradientAlgorithm(Algorithm, abc.ABC):
@abc.abstractmethod
def roll_out(self):
"""collect data and store to experience buffer."""
pass
class ConstrainedPolicyGradientAlgorithm(abc.ABC):
""" Abstract base class for Lagrangian-TRPO and Lagrangian-PPO."""
def __init__(self,
cost_limit: float,
use_lagrangian_penalty: bool,
lagrangian_multiplier_init: float,
lambda_lr: float,
lambda_optimizer: str
):
self.cost_limit = cost_limit
self.lambda_lr = lambda_lr
self.use_lagrangian_penalty = use_lagrangian_penalty
init_value = max(lagrangian_multiplier_init, 1e-5)
self.lagrangian_multiplier = torch.nn.Parameter(
torch.as_tensor(init_value),
requires_grad=True)
self.lambda_range_projection = torch.nn.ReLU()
# fetch optimizer from PyTorch optimizer package
assert hasattr(optim, lambda_optimizer), \
f'Optimizer={lambda_optimizer} not found in torch.'
torch_opt = getattr(optim, lambda_optimizer)
self.lambda_optimizer = torch_opt([self.lagrangian_multiplier, ],
lr=lambda_lr)
def compute_lambda_loss(self, mean_ep_cost):
"""Penalty loss for Lagrange multiplier."""
return -self.lagrangian_multiplier * (mean_ep_cost - self.cost_limit)
def update_lagrange_multiplier(self, ep_costs):
""" Update Lagrange multiplier (lambda)
Note: ep_costs obtained from: self.logger.get_stats('EpCosts')[0]
are already averaged across MPI processes.
"""
self.lambda_optimizer.zero_grad()
lambda_loss = self.compute_lambda_loss(ep_costs)
lambda_loss.backward()
self.lambda_optimizer.step()
self.lagrangian_multiplier.data.clamp_(0) # enforce: lambda in [0, inf]
# ====================================
# Actor Modules
# ====================================
class Actor(nn.Module):
def __init__(self, obs_dim, act_dim, weight_initialization, shared=None):
super(Actor, self).__init__()
self.obs_dim = obs_dim
self.act_dim = act_dim
self.shared = shared
self.weight_initialization = weight_initialization
def dist(self, obs) -> torch.distributions.Distribution:
raise NotImplementedError
def log_prob_from_dist(self, pi, act) -> torch.Tensor:
raise NotImplementedError
def forward(self, obs, act=None) -> tuple:
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self.dist(obs)
logp_a = None
if act is not None:
logp_a = self.log_prob_from_dist(pi, act)
return pi, logp_a
def sample(self, obs) -> tuple:
raise NotImplementedError
def predict(self, obs) -> tuple:
""" Predict action based on observation without exploration noise.
Use this method for evaluation purposes. """
return self.sample(obs)
@register_actor("mlp_categorical")
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation,
weight_initialization, shared=None):
super().__init__(obs_dim, act_dim, weight_initialization, shared=shared)
if shared is not None:
raise NotImplementedError
self.net = build_mlp_network(
[obs_dim] + list(hidden_sizes) + [act_dim],
activation=activation,
weight_initialization=weight_initialization
)
def dist(self, obs) -> torch.distributions.Distribution:
logits = self.net(obs)
return Categorical(logits=logits)
def log_prob_from_dist(self, pi, act) -> torch.Tensor:
return pi.log_prob(act)
def sample(self, obs) -> tuple:
# frac is necessary for epsilon greedy
# eps_threshold = np.max([self.current_eps, self.min_eps])
dist = self.dist(obs)
a = dist.sample()
logp_a = self.log_prob_from_dist(dist, a)
return a, logp_a
@register_actor("mlp_gaussian")
class MLPGaussianActor(Actor):
def __init__(
self,
obs_dim,
act_dim,
hidden_sizes,
activation,
weight_initialization,
shared=None):
super().__init__(obs_dim, act_dim, weight_initialization)
log_std = np.log(0.5) * np.ones(self.act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std),
requires_grad=False)
if shared is not None: # use shared layers
action_head = nn.Linear(hidden_sizes[-1], act_dim)
self.net = nn.Sequential(shared, action_head, nn.Identity())
else:
layers = [self.obs_dim] + list(hidden_sizes) + [self.act_dim]
self.net = build_mlp_network(
layers,
activation=activation,
weight_initialization=weight_initialization
)
def dist(self, obs):
mu = self.net(obs)
return Normal(mu, self.std)
def log_prob_from_dist(self, pi, act) -> torch.Tensor:
# Last axis sum needed for Torch Normal distribution
return pi.log_prob(act).sum(axis=-1)
def sample(self, obs):
pi = self.dist(obs)
a = pi.sample()
logp_a = self.log_prob_from_dist(pi, a)
return a, logp_a
def set_log_std(self, frac):
""" To support annealing exploration noise.
frac is annealing from 1. to 0 over course of training"""
assert 0 <= frac <= 1
new_stddev = 0.499 * frac + 0.01 # annealing from 0.5 to 0.01
# new_stddev = 0.3 * frac + 0.2 # linearly anneal stddev from 0.5 to 0.2
log_std = np.log(new_stddev) * np.ones(self.act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std),
requires_grad=False)
@property
def std(self):
""" Standard deviation of distribution."""
return torch.exp(self.log_std)
def predict(self, obs):
""" Predict action based on observation without exploration noise.
Use this method for evaluation purposes. """
action = self.net(obs)
log_p = torch.ones_like(action) # avoid type conflicts at evaluation
return action, log_p
# ====================================
# Critic Modules
# ====================================
class MLPCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, activation, shared=None):
super().__init__()
if shared is None:
self.net = build_mlp_network([obs_dim] + list(hidden_sizes) + [1],
activation=activation)
else: # use shared layers
value_head = nn.Linear(hidden_sizes[-1], 1)
self.net = nn.Sequential(shared, value_head, nn.Identity())
def forward(self, obs):
return torch.squeeze(self.net(obs),
-1) # Critical to ensure v has right shape.
class ActorCritic(nn.Module):
def __init__(self,
actor_type,
observation_space,
action_space,
use_standardized_obs,
use_scaled_rewards,
use_shared_weights,
ac_kwargs,
weight_initialization='kaiming_uniform'
):
super().__init__()
self.obs_shape = observation_space.shape
self.obs_oms = OnlineMeanStd(shape=self.obs_shape) \
if use_standardized_obs else None
self.ac_kwargs = ac_kwargs
# policy builder depends on action space
if isinstance(action_space, Box):
distribution_type = 'gaussian'
act_dim = action_space.shape[0]
elif isinstance(action_space, Discrete):
distribution_type = 'categorical'
act_dim = action_space.n
else:
raise ValueError
obs_dim = observation_space.shape[0]
layer_units = [obs_dim] + list(ac_kwargs['pi']['hidden_sizes'])
act = ac_kwargs['pi']['activation']
if use_shared_weights:
shared = build_mlp_network(
layer_units,
activation=act,
weight_initialization=weight_initialization,
output_activation=act
)
else:
shared = None
actor_fn = get_registered_actor_fn(actor_type, distribution_type)
self.pi = actor_fn(obs_dim=obs_dim,
act_dim=act_dim,
shared=shared,
weight_initialization=weight_initialization,
**ac_kwargs['pi'])
self.v = MLPCritic(obs_dim,
shared=shared,
**ac_kwargs['val'])
self.ret_oms = OnlineMeanStd(shape=(1,)) if use_scaled_rewards else None
def forward(self,
obs: torch.Tensor
) -> tuple:
return self.step(obs)
def step(self,
obs: torch.Tensor
) -> tuple:
""" Produce action, value, log_prob(action).
If training, this includes exploration noise!
Expects that obs is not pre-processed.
Note:
Training mode can be activated with ac.train()
Evaluation mode is activated by ac.eval()
"""
with torch.no_grad():
if self.obs_oms:
# Note: Update RMS in Algorithm.running_statistics() method
# self.obs_oms.update(obs) if self.training else None
obs = self.obs_oms(obs)
v = self.v(obs)
if self.training:
a, logp_a = self.pi.sample(obs)
else:
a, logp_a = self.pi.predict(obs)
return a.numpy(), v.numpy(), logp_a.numpy()
def act(self,
obs: torch.Tensor
) -> np.ndarray:
return self.step(obs)[0]
def update(self, frac):
"""update internals of actors
1) Updates exploration parameters
+ for Gaussian actors update log_std
frac: progress of epochs, i.e. current epoch / total epochs
e.g. 10 / 100 = 0.1
"""
if hasattr(self.pi, 'set_log_std'):
self.pi.set_log_std(1 - frac)
class ActorCriticWithCosts(ActorCritic):
def __init__(
self,
**kwargs
):
super().__init__(**kwargs)
self.c = MLPCritic(
obs_dim=self.obs_shape[0],
shared=None,
**self.ac_kwargs['val'])
def step(self,
obs: torch.Tensor
) -> tuple:
""" Produce action, value, log_prob(action).
If training, this includes exploration noise!
Note:
Training mode can be activated with ac.train()
Evaluation mode is activated by ac.eval()
"""
with torch.no_grad():
if self.obs_oms:
# Note: do the updates at the end of batch!
# self.obs_oms.update(obs) if self.training else None
obs = self.obs_oms(obs)
v = self.v(obs)
c = self.c(obs)
if self.training:
a, logp_a = self.pi.sample(obs)
else:
a, logp_a = self.pi.predict(obs)
return a.numpy(), v.numpy(), c.numpy(), logp_a.numpy()
class Buffer:
def __init__(self,
actor_critic: torch.nn.Module,
obs_dim: tuple,
act_dim: tuple,
size: int,
gamma: float,
lam: float,
adv_estimation_method: str,
use_scaled_rewards: bool,
standardize_env_obs: bool,
standardize_advantages: bool,
lam_c: float = 0.95,
use_reward_penalty: bool = False
):
"""
A buffer for storing trajectories experienced by an agent interacting
with the environment, and using Generalized Advantage Estimation (GAE)
for calculating the advantages of state-action pairs.
Important Note: Buffer collects only raw data received from environment.
"""
self.actor_critic = actor_critic
self.size = size
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.discounted_ret_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = | np.zeros(size, dtype=np.float32) | numpy.zeros |
"""Multiclass predictions.
``y_pred`` should be two dimensional (n_samples x n_classes).
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 3 clause
import numpy as np
import warnings
from .base import BasePrediction
def _multiclass_init(self, y_pred=None, y_true=None, n_samples=None):
if y_pred is not None:
self.y_pred = np.array(y_pred)
elif y_true is not None:
self._init_from_pred_labels(y_true)
elif n_samples is not None:
self.y_pred = np.empty((n_samples, self.n_columns), dtype=float)
self.y_pred.fill(np.nan)
else:
raise ValueError(
'Missing init argument: y_pred, y_true, or n_samples')
self.check_y_pred_dimensions()
def _init_from_pred_labels(self, y_pred_labels):
"""Initalize y_pred to uniform for (positive) labels in y_pred_labels.
Initialize multiclass Predictions from ground truth. y_pred_labels
can be a single (positive) label in which case the corresponding
column gets probability of 1.0. In the case of multilabel (k > 1
positive labels), the columns corresponing the positive labels
get probabilities 1/k.
Parameters
----------
y_pred_labels : list of objects or list of list of objects
(of the same type)
"""
type_of_label = type(self.label_names[0])
self.y_pred = np.zeros(
(len(y_pred_labels), len(self.label_names)), dtype=np.float64)
for ps_i, label_list in zip(self.y_pred, y_pred_labels):
# converting single labels to list of labels, assumed below
if type(label_list) != np.ndarray and type(label_list) != list:
label_list = [label_list]
label_list = list(map(type_of_label, label_list))
for label in label_list:
ps_i[self.label_names.index(label)] = 1.0 / len(label_list)
@property
def _y_pred_label_index(self):
"""Multi-class y_pred is the index of the predicted label."""
return np.argmax(self.y_pred, axis=1)
@property
def _y_pred_label(self):
return self.label_names[self.y_pred_label_index]
@classmethod
def _combine(cls, predictions_list, index_list=None):
if index_list is None: # we combine the full list
index_list = range(len(predictions_list))
y_comb_list = np.array(
[predictions_list[i].y_pred for i in index_list])
# clipping probas into [0, 1], also taking care of the case of all zeros
y_comb_list = np.clip(y_comb_list, 10 ** -15, 1 - 10 ** -15)
# normalizing probabilities
y_comb_list = y_comb_list / | np.sum(y_comb_list, axis=2, keepdims=True) | numpy.sum |
from typing import Dict, Set, Union
import numpy as np
from pydrake.all import ModelInstanceIndex, MultibodyPlant
from qsim.simulator import QuasistaticSimulator
from quasistatic_simulator_py import (QuasistaticSimulatorCpp)
from .dynamical_system import DynamicalSystem
class QuasistaticDynamics(DynamicalSystem):
def __init__(self, h: float, q_sim_py: QuasistaticSimulator,
q_sim: QuasistaticSimulatorCpp):
super().__init__()
self.h = h
self.q_sim_py = q_sim_py
self.q_sim = q_sim
self.plant = q_sim.get_plant()
self.dim_x = self.plant.num_positions()
self.dim_u = q_sim.num_actuated_dofs()
self.models_all = self.q_sim.get_all_models()
self.models_actuated = self.q_sim.get_actuated_models()
self.models_unactuated = self.q_sim.get_unactuated_models()
# TODO: distinguish between position indices and velocity indices for
# 3D systems.
self.position_indices = self.q_sim.get_velocity_indices()
self.velocity_indices = self.position_indices
# make sure that q_sim_py and q_sim have the same underlying plant.
self.check_plants(
plant_a=q_sim.get_plant(),
plant_b=q_sim_py.get_plant(),
models_all_a=q_sim.get_all_models(),
models_all_b=q_sim_py.get_all_models(),
velocity_indices_a=q_sim.get_velocity_indices(),
velocity_indices_b=q_sim.get_velocity_indices())
@staticmethod
def check_plants(plant_a: MultibodyPlant, plant_b: MultibodyPlant,
models_all_a: Set[ModelInstanceIndex],
models_all_b: Set[ModelInstanceIndex],
velocity_indices_a: Dict[ModelInstanceIndex, np.ndarray],
velocity_indices_b: Dict[ModelInstanceIndex, np.ndarray]):
"""
Make sure that plant_a and plant_b are identical.
"""
assert models_all_a == models_all_b
for model in models_all_a:
name_a = plant_a.GetModelInstanceName(model)
name_b = plant_b.GetModelInstanceName(model)
assert name_a == name_b
idx_a = velocity_indices_a[model]
idx_b = velocity_indices_b[model]
assert idx_a == idx_b
def get_u_indices_into_x(self):
u_indices = np.zeros(self.dim_u, dtype=int)
i_start = 0
for model in self.models_actuated:
indices = self.velocity_indices[model]
n_a_i = len(indices)
u_indices[i_start: i_start + n_a_i] = indices
i_start += n_a_i
return u_indices
def get_q_a_cmd_dict_from_u(self, u: np.ndarray):
q_a_cmd_dict = dict()
i_start = 0
for model in self.models_actuated:
n_v_i = self.plant.num_velocities(model)
q_a_cmd_dict[model] = u[i_start: i_start + n_v_i]
i_start += n_v_i
return q_a_cmd_dict
def get_q_dict_from_x(self, x: np.ndarray):
q_dict = {
model: x[n_q_indices]
for model, n_q_indices in self.position_indices.items()}
return q_dict
def get_x_from_q_dict(self, q_dict: Dict[ModelInstanceIndex, np.ndarray]):
x = np.zeros(self.dim_x)
for model, n_q_indices in self.position_indices.items():
x[n_q_indices] = q_dict[model]
return x
def get_u_from_q_cmd_dict(self,
q_cmd_dict: Dict[ModelInstanceIndex, np.ndarray]):
u = np.zeros(self.dim_u)
i_start = 0
for model in self.models_actuated:
n_v_i = self.plant.num_velocities(model)
u[i_start: i_start + n_v_i] = q_cmd_dict[model]
i_start += n_v_i
return u
def get_Q_from_Q_dict(self,
Q_dict: Dict[ModelInstanceIndex, np.ndarray]):
Q = np.eye(self.dim_x)
for model, idx in self.velocity_indices.items():
Q[idx, idx] = Q_dict[model]
return Q
def get_R_from_R_dict(self,
R_dict: Dict[ModelInstanceIndex, np.ndarray]):
R = np.eye(self.dim_u)
i_start = 0
for model in self.models_actuated:
n_v_i = self.plant.num_velocities(model)
R[i_start: i_start + n_v_i, i_start: i_start + n_v_i] = \
| np.diag(R_dict[model]) | numpy.diag |
class input_data:
## Value Initilization
import numpy as np
from matplotlib import pyplot as plt
import math
# input data for temperature profile calculation
rf = (4.18/2)*1E-3
dr = 1E-4 # mesh spacing
alphat = 1E-6 # alpha only contains 1/rho*Cp
dt_max = (dr ** 2) / (2 * alphat) # stability condition
dt = dt_max / 10 # time step size
sp = int(rf/dr) # no of spatial points
time_step = int(1E4) # time points
kc = 29.0 # clad conductance
kg = 58.22E-3 # conductance in gap between clad and gap
G = 427E-6 # gap
hg = kg / G # gap conductance
linear_heat_rate = 400*1E2
Qe = linear_heat_rate/(3.14*rf**2)
# Parameter for calculation of k
k = [2 for i in range(sp + 1)] # initial array of k
x = 2.0 - 1.97 # 1.97 is O/M ratio
A = 2.85 * x + 0.035
B = -0.715 * x + 0.286
space = [i for i in range(1, sp)]
# Input data for crack surface area calculation
a = 5E-6 # grain radius
R = rf-0.09E-3 # radius of the pellet
H = 14.0E-3 # height of the pellet
thickness_of_annuli = 0.5E-3
no_of_annuli = int(R / thickness_of_annuli) # The pellet is divided in n annuli
q = 35 # E3 # LHR of pin (kW/m)
Nc = q / 2.0 # No of radial cracks
S = np.zeros(no_of_annuli + 1) # initialization suRf_dotace area
r = np.ones(no_of_annuli + 1)
r[0] = R
V = float((3.14 * (R ** 2) * H) / no_of_annuli)
temp_T = np.ones(no_of_annuli)
n = int(5E3)
s_v_gb_cummulative = 0
s_v_gb = np.zeros(n)
fc = np.ones(n)
Dfc = np.ones(n)
Q = np.ones(n)
DQ = np.ones(n)
Rf_dot = np.ones(n)
# time required for the establishment of grain bubble t_est
# n = int(1E2)
fc = np.ones(n)
Dfc = np.ones(n)
DQ = np.ones(n)
Rf_dot = np.zeros(n)
to = 1 * np.zeros(n)
tc = 1E2*np.ones(n)
rt = 1E-10*np.ones(n)
# rt[1] = 1E-/7
rt[1] = 1E-09
rc = np.ones(n)
rc[1] = 1E-7
Re_dot = np.zeros(n)
Re_dot[1] = 1E1
Rc_dot = np.zeros(n)
Rc_dot[1] = 1E1
# rt[1] = 1E-7
# rc[1] = 1E-7
a1 = 0.1
a2 = 2.2
omega = 4.1E-29
Del_S_by_S = np.zeros(n)
N_dot = np.zeros(n)
N = np.zeros(n)
phi1 = np.zeros(n)
u1 = np.zeros(n)
Rc_dot[1] = 1
tc_total = 1E1*np.ones(n)
# N_max = np.ones(n)
# N_max = np.zeros(n)
# data for grain area/volume calculation
alpha = 1E15 # fixed sink length (m-2)
s = 3E-10 # atomic jump distance
R_gas_constant = 1.987 # 1.9872036E-3 kcal/kelvin.mol # Gas constant
Z = 100 # n of sites around a point from which recombination is inevitable
rf1 = 0.5E-6
K = 2E-4 # defect production rate per atom
F = 1E15 # fission rate
gamma = 0.5 # (check the value) #free suRf_dotace energy
theta = 50
fb = 0.25
beta = 1.65E18 # (check the value)generation rate of stable fission gas atoms(value checked..!!)
Pext = 0 # (check the value) external pressure on the fuel pellet (value checked..!!)
kb = 1.3807E-23 # J/kalvin#5.67E8 check value of boltzman constant
bvd = 1E-13 # bvd = bv*delta
# inputs for sv_cracks
lmbda = 3600* (np.array ([2.05E-9, 6.7401E-7, 1.5424E-6, 3.6624E-6, 2.12E-5, 4.415E-5, 6.73E-5, 1.05E-4, 1.515E-4,
7.35E-4, 7.55E-4, 8.17E-4, 3.01E-3, 3.75E-3]))
grainarea = np.ones(temp_T.size)
Diff = np.ones(temp_T.size)
rba = np.zeros(temp_T.size)
s_v_t = np.zeros(temp_T.size)
rbf = np.zeros(lmbda.size)
# Information of isotopes
no_of_iteration = int (1E6)
dt_nu = 5E-1
pin_failure_time = int (5E10)
reactor_shut_down_time = 2E10
isotopes = ['kr-85', 'xe-131m', 'xe-133', 'xe-133m', 'xe-135', 'kr-85m', 'kr-88', 'kr-83m', 'kr-87', 'xe-134m',
'xe-135m', 'xe-138', 'xe-137', 'kr-89']
production = 3600* 1E12 * np.array(
[0.05, 0.09, 17.30, 0.57, 18.70, 1.40, 3.20, 0.74, 2.50, 0.47, 4.45, 12.80, 14.80, 3.7])
# production_matrix = np.zeros([lmbda.size,lmbda.size])
# lmbda_matrix = np.zeros([lmbda.size,lmbda.size])
sweep_to_volume = (60*3000*1E-6)/(4) # 2000 * 60 conversion of 2 litre/ min to cc/hout
##(200E-6 / (4)) # sweep rate = 0.5 m3/s, volume of the covergas=5 m3 (200E-6)
Na = np.zeros([lmbda.size])
Nb = np.zeros([lmbda.size])
Na_inf = | np.ones(lmbda.size) | numpy.ones |
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
from matplotlib import rcParams
from torch_geometric.data import Batch
from torch_geometric.data import DataLoader
from tqdm import tqdm
from nasbench301.surrogate_models import utils
from nasbench301.surrogate_models.gnn.gnn_utils import NASBenchDataset, Patience
from nasbench301.surrogate_models.gnn.models.deep_multisets import DeepMultisets
from nasbench301.surrogate_models.gnn.models.deeper_gnn import DeeperGCN
from nasbench301.surrogate_models.gnn.models.diff_pool import DiffPool
from nasbench301.surrogate_models.gnn.models.gincnn import GIN
from nasbench301.surrogate_models.gnn.models.vsgae_enc import GNNpred, GNNpred_classifier
from nasbench301.surrogate_models.surrogate_model import SurrogateModel
sns.set_style('whitegrid')
rcParams.update({'figure.autolayout': True})
class GNNSurrogateModel(SurrogateModel):
def __init__(self, gnn_type, data_root, log_dir, seed, model_config, data_config):
super(GNNSurrogateModel, self).__init__(data_root=data_root, log_dir=log_dir, seed=seed,
model_config=model_config, data_config=data_config)
self.device = torch.device('cpu')
# Instantiate dataloader to extract one batch in order to know the number of node features
test_queue = self.load_results_from_result_paths(['surrogate_models/test/results_fidelity_0/results_0.json'])
single_graph_batch = next(iter(test_queue))
# Instantiate the GNN
model = self.instantiate_gnn(gnn_type=gnn_type, num_node_features=single_graph_batch.num_node_features,
model_config=model_config)
self.model = model.to(self.device)
logging.info('Num Parameters {}'.format(sum(p.numel() for p in self.model.parameters() if p.requires_grad)))
def instantiate_gnn(self, gnn_type, num_node_features, model_config):
if gnn_type == 'gnn_gin':
model = GIN(dim_features=num_node_features,
dim_target=1, model_config=model_config)
elif gnn_type == 'gnn_diff_pool':
model = DiffPool(dim_features=num_node_features,
dim_target=1, model_config=model_config)
elif gnn_type == 'gnn_deep_multisets':
model = DeepMultisets(dim_features=num_node_features,
dim_target=1, model_config=model_config)
elif gnn_type == 'gnn_vs_gae':
model = GNNpred(dim_features=self.model_config['gnn_node_dimensions'],
dim_target=1, model_config=model_config)
elif gnn_type == 'gnn_vs_gae_classifier':
model = GNNpred_classifier(dim_features=self.model_config['gnn_node_dimensions'],
dim_target=1, model_config=model_config)
elif gnn_type == 'deeper_gnn':
model = DeeperGCN(dim_features=num_node_features, dim_target=1, model_config=model_config)
else:
raise NotImplementedError('Unknown gnn_type.')
return model
def load_results_from_result_paths(self, result_paths):
# Instantiate dataset
dataset = NASBenchDataset(root=self.data_root, model_config=self.model_config, result_paths=result_paths,
config_loader=self.config_loader)
# Create dataloader
dataloader = DataLoader(dataset, batch_size=self.model_config['batch_size'], pin_memory=True)
return dataloader
def train(self):
if self.model_config['loss_function'] == 'L1':
criterion = torch.nn.L1Loss()
elif self.model_config['loss_function'] == 'L2':
criterion = torch.nn.MSELoss()
elif self.model_config['loss_function'] == 'HUBER':
criterion = torch.nn.SmoothL1Loss()
else:
raise NotImplementedError('Unknown loss function used.')
# Create early stopper
early_stopper = Patience(patience=30, use_loss=True)
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.model_config['learning_rate'])
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, self.model_config['epochs'], eta_min=self.model_config['learning_rate_min'])
# Load training data
train_queue = self.load_results_from_result_paths(self.train_paths)
valid_queue = self.load_results_from_result_paths(self.val_paths)
# Start train loop
for epoch in tqdm(range(self.model_config['epochs'])):
logging.info('Starting epoch {}'.format(epoch))
lr = scheduler.get_last_lr()[0]
# training
train_obj, train_results = self.train_epoch(train_queue, valid_queue, self.model, criterion, optimizer, lr,
epoch)
logging.info('train metrics: %s', train_results)
scheduler.step()
# validation
valid_obj, valid_results = self.infer(train_queue, valid_queue, self.model, criterion, optimizer, lr, epoch)
logging.info('validation metrics: %s', valid_results)
# save the model
# self.save()
# Early Stopping
if early_stopper is not None and early_stopper.stop(epoch, val_loss=valid_obj,
val_acc=valid_results["kendall_tau"]):
logging.info(
'Early Stopping at epoch {}, best is {}'.format(epoch, early_stopper.get_best_vl_metrics()))
break
return valid_results
def normalize_data(self, val_accuracy, val_min=None):
if val_min is None:
return torch.log(1 - val_accuracy)
else:
return torch.log(1 - val_accuracy / val_min)
def unnormalize_data(self, normalized_accuracy):
return 1 - np.exp(normalized_accuracy)
def create_bins(self, lower_bound, width, quantity):
bins = []
for low in range(lower_bound,
lower_bound + quantity * width + 1, width):
bins.append((low, low + width))
return bins
def find_bin(self, value, bins):
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def train_epoch(self, train_queue, valid_queue, model, criterion, optimizer, lr, epoch):
objs = utils.AvgrageMeter()
# TRAINING
preds = []
targets = []
model.train()
for step, graph_batch in enumerate(train_queue):
graph_batch = graph_batch.to(self.device)
# print(step)
if self.model_config['model'] == 'gnn_vs_gae_classifier':
pred_bins, pred = self.model(graph_batch=graph_batch)
criterion = torch.nn.BCELoss()
criterion_2 = torch.nn.MSELoss()
bins = self.create_bins(lower_bound=0,
width=10,
quantity=9)
binned_weights = []
for value in graph_batch.y.cpu().numpy():
bin_index = self.find_bin(value, bins)
binned_weights.append(bin_index)
bins = torch.FloatTensor(binned_weights)
make_one_hot = lambda index: torch.eye(self.model_config['no_bins'])[index.view(-1).long()]
binns_one_hot = make_one_hot(bins).to(self.device)
loss_1 = criterion(pred_bins, binns_one_hot)
loss_2 = criterion_2(pred, self.normalize_data(graph_batch.y))
alpha = self.model_config['classification_loss']
beta = self.model_config['regression_loss']
loss = alpha * loss_1 + beta * loss_2
else:
pred = self.model(graph_batch=graph_batch)
if self.model_config['loss:loss_log_transform']:
loss = criterion(self.normalize_data(pred), self.normalize_data(graph_batch.y / 100))
else:
loss = criterion(pred, graph_batch.y / 100)
if self.model_config['loss:pairwise_ranking_loss']:
m = 0.1
'''
y = list(map(lambda y_i: 1 if y_i == True else -1, graph_batch.y[0: -1] > graph_batch.y[1:]))
pairwise_ranking_loss = torch.nn.HingeEmbeddingLoss(margin=m)(pred[0:-1] - pred[1:],
target=torch.from_numpy(np.array(y)))
'''
pairwise_ranking_loss = []
sort_idx = torch.argsort(graph_batch.y, descending=True)
for idx, idx_y_i in enumerate(sort_idx):
for idx_y_i_p1 in sort_idx[idx + 1:]:
pairwise_ranking_loss.append(torch.max(torch.tensor(0.0, dtype=torch.float),
m - (pred[idx_y_i] - pred[idx_y_i_p1])))
pairwise_ranking_loss = torch.mean(torch.stack(pairwise_ranking_loss))
loss += pairwise_ranking_loss
if step % self.data_config['report_freq'] == 0:
logging.info('Pairwise ranking loss {}'.format(pairwise_ranking_loss))
preds.extend(pred.detach().cpu().numpy() * 100)
targets.extend(graph_batch.y.detach().cpu().numpy())
optimizer.zero_grad()
loss.backward()
optimizer.step()
n = graph_batch.num_graphs
objs.update(loss.data.item(), n)
if step % self.data_config['report_freq'] == 0:
logging.info('train %03d %e', step, objs.avg)
fig = utils.scatter_plot(np.array(preds), np.array(targets), xlabel='Predicted', ylabel='True', title='')
fig.savefig(os.path.join(self.log_dir, 'pred_vs_true_train_{}.jpg'.format(epoch)))
plt.close()
train_results = utils.evaluate_metrics(np.array(targets), np.array(preds), prediction_is_first_arg=False)
return objs.avg, train_results
def infer(self, train_queue, valid_queue, model, criterion, optimizer, lr, epoch):
objs = utils.AvgrageMeter()
# VALIDATION
preds = []
targets = []
model.eval()
for step, graph_batch in enumerate(valid_queue):
graph_batch = graph_batch.to(self.device)
if self.model_config['model'] == 'gnn_vs_gae_classifier':
pred_bins, pred = self.model(graph_batch=graph_batch)
criterion = torch.nn.BCELoss()
criterion_2 = torch.nn.MSELoss()
bins = self.create_bins(lower_bound=0,
width=10,
quantity=9)
binned_weights = []
for value in graph_batch.y.cpu().numpy():
bin_index = self.find_bin(value, bins)
binned_weights.append(bin_index)
bins = torch.FloatTensor(binned_weights)
make_one_hot = lambda index: torch.eye(self.model_config['no_bins'])[index.view(-1).long()]
binns_one_hot = make_one_hot(bins).to(self.device)
loss_1 = criterion(pred_bins, binns_one_hot)
loss_2 = criterion_2(pred, self.normalize_data(graph_batch.y))
alpha = self.model_config['classification_loss']
beta = self.model_config['regression_loss']
loss = alpha * loss_1 + beta * loss_2
else:
pred = self.model(graph_batch=graph_batch)
loss = criterion(self.normalize_data(pred), self.normalize_data(graph_batch.y / 100))
preds.extend(pred.detach().cpu().numpy() * 100)
targets.extend(graph_batch.y.detach().cpu().numpy())
n = graph_batch.num_graphs
objs.update(loss.data.item(), n)
if step % self.data_config['report_freq'] == 0:
logging.info('valid %03d %e ', step, objs.avg)
fig = utils.scatter_plot(np.array(preds), np.array(targets), xlabel='Predicted', ylabel='True', title='')
fig.savefig(os.path.join(self.log_dir, 'pred_vs_true_valid_{}.jpg'.format(epoch)))
plt.close()
val_results = utils.evaluate_metrics(np.array(targets), np.array(preds), prediction_is_first_arg=False)
return objs.avg, val_results
def test(self):
preds = []
targets = []
self.model.eval()
test_queue = self.load_results_from_result_paths(self.test_paths)
for step, graph_batch in enumerate(test_queue):
graph_batch = graph_batch.to(self.device)
if self.model_config['model'] == 'gnn_vs_gae_classifier':
pred_bins, pred = self.model(graph_batch=graph_batch)
else:
pred = self.model(graph_batch=graph_batch)
preds.extend(pred.detach().cpu().numpy() * 100)
targets.extend(graph_batch.y.detach().cpu().numpy())
fig = utils.scatter_plot(np.array(preds), np.array(targets), xlabel='Predicted', ylabel='True', title='')
fig.savefig(os.path.join(self.log_dir, 'pred_vs_true_test.jpg'))
plt.close()
test_results = utils.evaluate_metrics( | np.array(targets) | numpy.array |
"""Radio Resource Allocation dataset obtention and processing.
This script generates the radio resource management synthetic dataset into Networkx graphs, to use
in iGNNition example.
For the script to work, the following additional packages are needed:
- tqdm -> https://github.com/tqdm/tqdm
References:
- <NAME>, <NAME>, <NAME> and <NAME>,
"Graph Neural Networks for Scalable Radio Resource Management: Architecture Design and
Theoretical Analysis", in IEEE Journal on Selected Areas in Communications, vol. 39, no. 1,
pp. 101-115, Jan. 2021 doi: 10.1109/JSAC.2020.3036965.
- <NAME>, <NAME> and <NAME>,
"Group Sparse Beamforming for Green Cloud-RAN," in IEEE Transactions on Wireless Communications,
vol. 13, no. 5, pp. 2809-2823, May 2014, doi: 10.1109/TWC.2014.040214.131770.
"""
import json
import math
import networkx as nx
import numpy as np
import os
import shutil
from itertools import product
from pathlib import Path
from tqdm.auto import tqdm
# Generation options
empty_dirs = True
n_links = 10
random_seed = 20210205
root_path = Path(__file__).parent
raw_dir = root_path / Path("data/raw_"+str(n_links))
train_dir = root_path / Path("data/train_"+str(n_links))
train_samples = 1000
validation_dir = root_path / Path("data/validation_"+str(n_links))
validation_samples = 100
# Computed
total_samples = train_samples + validation_samples
rng = np.random.default_rng(seed=random_seed)
# Dataset options, please see the referenced papers for more details
field_length = 1000
shortest_directLink_length = 2
longest_directLink_length = 65
shortest_crossLink_length = 1
carrier_f = 2.4e9
tx_height = 1.5
rx_height = 1.5
# Channel loss options
signal_lambda = 2.998e8 / carrier_f
Rbp = 4 * tx_height * rx_height / signal_lambda
Lbp = abs(20 * np.log10(np.power(signal_lambda, 2) / (8 * np.pi * tx_height * rx_height)))
antenna_gain_decibel = 2.5
# Network additional inputs
noise_power = 4e-12
def _empty_dirs(dirs=None):
if dirs is None:
return
elif isinstance(dirs, (Path, str)):
dirs = [Path(dirs)]
for _dir in dirs:
assert isinstance(_dir, Path)
for file in [f for f in _dir.glob("*") if f.is_file()]:
file.unlink()
def compute_losses(distances, add_shadowing=True, add_fast_fading=True):
N = np.shape(distances)[-1]
assert N == n_links
# compute coefficient matrix for each Tx/Rx pair
sum_term = 20 * np.log10(distances / Rbp)
# adjust for longer path loss
Tx_over_Rx = Lbp + 6 + sum_term + ((distances > Rbp).astype(int)) * sum_term
# only add antenna gain for direct channel
path_losses = -Tx_over_Rx + np.eye(N) * antenna_gain_decibel
path_losses = np.power(10, (path_losses / 10)) # convert from decibel to absolute
# Compute channel losses, if specified
channel_losses = np.copy(path_losses)
if add_shadowing:
shadow_coefficients = rng.normal(loc=0, scale=8, size=np.shape(channel_losses))
channel_losses = channel_losses * np.power(10.0, shadow_coefficients / 10)
if add_fast_fading:
fast_fadings = (
np.power(rng.normal(loc=0, scale=1, size=np.shape(channel_losses)), 2) +
np.power(rng.normal(loc=0, scale=1, size=np.shape(channel_losses)), 2)
) / 2
channel_losses = channel_losses * fast_fadings
# Join non-diagonal path with diagonal channel losses
mask = np.eye(N)
off_diag_path = path_losses - np.multiply(mask, path_losses)
diag_channel = np.multiply(mask, channel_losses)
path_losses = diag_channel + off_diag_path
return path_losses, channel_losses
def generate_layout():
"""Generate a single graph."""
N = n_links
# first, generate transmitters' coordinates
tx_xs = rng.uniform(low=0, high=field_length, size=[N, 1])
tx_ys = rng.uniform(low=0, high=field_length, size=[N, 1])
while True: # loop until a valid layout generated
# generate rx one by one rather than N together to ensure checking validity one by one
rx_xs = []
rx_ys = []
for i in range(N):
got_valid_rx = False
while not got_valid_rx:
pair_dist = rng.uniform(
low=shortest_directLink_length,
high=longest_directLink_length,
)
pair_angles = rng.uniform(low=0, high=np.pi * 2)
rx_x = tx_xs[i] + pair_dist * np.cos(pair_angles)
rx_y = tx_ys[i] + pair_dist * np.sin(pair_angles)
if (
0 <= rx_x <= field_length
and 0 <= rx_y <= field_length
):
got_valid_rx = True
rx_xs.append(rx_x)
rx_ys.append(rx_y)
# For now, assuming equal weights and equal power, so not generating them
layout = np.concatenate((tx_xs, tx_ys, rx_xs, rx_ys), axis=1)
distances = np.zeros([N, N])
# compute distance between every possible Tx/Rx pair
for rx_index in range(N):
for tx_index in range(N):
tx_coor = layout[tx_index][0:2]
rx_coor = layout[rx_index][2:4]
# according to paper notation convention
# Hij is from jth transmitter to ith receiver
distances[rx_index][tx_index] = np.linalg.norm(tx_coor - rx_coor)
# Check whether a tx-rx link (potentially cross-link) is too close
if np.min(distances) > shortest_crossLink_length:
break
return layout, distances
def generate_graphs(output_dir="data/raw", output_prefix="network", empty_dirs=False):
"""Generate all graphs for the dataset.
This method generates Networkx graphs for the Radio Resource Allocation problem, from the module
options, and saves them to the specified directory as JSON file, which will then be stacked to
form the dataset.
"""
if empty_dirs:
_empty_dirs(output_dir)
N = n_links
print(f"Generating {total_samples} network graphs in {output_dir}.")
for i in tqdm(range(total_samples)):
layout, dist = generate_layout()
path_loss, channel_loss = compute_losses(dist)
assert np.shape(layout) == (N, 4)
assert np.shape(dist) == np.shape(path_loss) == np.shape(channel_loss) == (N, N)
diag_dist = np.diag(1/dist)
diag_channel_loss = np.diag(channel_loss)
adjacency = channel_loss - np.multiply(np.eye(N), channel_loss) # Remove own pair
weights = rng.uniform(size=N) # Transceiver-receiver random weights
weights = weights / weights.sum() # Normalize weights
wmmse_power = get_wmmse_power(channel_loss, noise_power)
graph = nx.DiGraph()
# We add as node attribute a placeholder per node power label to use as target
# altough the loss we plan to use will be self-supervised.
graph.add_nodes_from([
(link_idx, {
"entity": "transmitter_receiver_pair",
"transceiver_x": layout[link_idx, 0],
"transceiver_y": layout[link_idx, 1],
"receiver_x": layout[link_idx, 2],
"receiver_y": layout[link_idx, 3],
"receiver_distance": diag_dist[link_idx],
"channel_loss": diag_channel_loss[link_idx],
"path_loss": path_loss[:, link_idx].tolist(),
"power": 0,
"weights": weights[link_idx],
"wmmse_power": wmmse_power[link_idx],
})
for link_idx in range(N)
])
graph.add_edges_from([
(src, dst, {
"transceiver_receiver_loss": adjacency[src, dst]
})
for src, dst in product(range(N), range(N)) if src != dst
])
graph.graph["noise_power"] = noise_power
filepath = Path(output_dir) / f"{output_prefix}_{i}.json"
with filepath.open("w") as _f:
json.dump(nx.readwrite.json_graph.node_link_data(graph), _f)
print(f"Finished generating {total_samples} network graphs in {output_dir}.")
def get_wmmse_power(channel_loss, noise_power, max_iterations=100):
"""Get WMMSE optimimum power aproximation for given matrix of channel losses and noise power."""
H = channel_loss
K = np.shape(channel_loss)[0]
P_ini = | np.random.rand(K, 1) | numpy.random.rand |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 26 00:32:31 2020
@author: <NAME>
based on code by <NAME>
"""
import numpy as np
from sklearn.cross_decomposition import PLSRegression
# OSC
# nicomp is the number of internal components, ncomp is the number of
# components to remove (ncomp=1 recommended)
class OSC:
def __init__(self,version="SWosc",nicomp=18,ncomp=1,epsilon = 10e-6, max_iters = 20):
self.version=version
self.nicomp=nicomp
self.ncomp=ncomp
self.epsilon=epsilon
self.max_iters=20
def fit(self,xx,y):
X=xx.copy()
Y=y.copy()
# Separating X from Y for PLS
# Needs to be converted to numpy array from pandas df
#X=self.df[self.freqs].to_numpy()
# Y need to be converted to numpy array from pandas series and reshaped to (N,1) from (N,)
#Y=self.df[self.y_name].to_numpy().reshape(-1, 1)
# Self developed version
if self.version=="SWosc":
#Centering data
A=np.identity(n = X.shape[0]) / X.shape[0]
mu_x = ((A.dot(X)).sum(axis=0))/ A.sum()
mu_y = ((A.dot(Y)).sum(axis=0))/ A.sum()
Xc = X - mu_x
Yc = Y - mu_y
#matrices to store loading vectors
W = | np.zeros((X.shape[1],self.ncomp)) | numpy.zeros |
from functools import wraps
from typing import Callable, TypeVar, cast
import numpy as np
from numba import njit
from numfun.barycentric import barycentric_interpolation
F = TypeVar('F', bound=Callable)
def complexify(g: F) -> F:
"""Decorator to apply g on real and imaginary parts and the return the sum.
:param g: A linear operator such that g(a+ib) = g(a) + i g(b)
:return: a function which adds g(real(input)) + 1j * g(imag(input))
"""
@wraps(g)
def wrapper(coefficients: np.ndarray) -> np.ndarray:
"""coefficients is a complex input array."""
# Make sure c is a numpy array
coefficients = 1.0 * np.array(coefficients)
if np.all(np.isreal(coefficients)):
return g(coefficients.real)
if np.all(np.isreal(1j * coefficients)):
return 1j * g(coefficients.imag)
u = g(coefficients.real)
v = g(coefficients.imag)
return u + 1j * v
return cast(F, wrapper)
@complexify
@njit
def chebyshev_coefficients_of_integral(coefficients: np.array) -> np.array:
"""Indefinite integral of a Function with given Chebyshev coefficients
such that f(-1) = 0.
NOTE: The algorithm works for complex coefficients, but for jit to
work, we have to wrap this in the @complexify decorator
###########################################################################
If the underlying function is represented as a n-vector c[k]:
\sum_{k=0}^{n-1} c_k T_k(x)
its integral is represented as a vector of length n+1 given by:
\sum_{k=0}^{n} b_k T_k(x)
where b_0 is determined from the constant of integration as
b_0 = \sum_{k=1}^{n} (-1)^(k+1) b_k
and other coefficients are given by
b_1 = c_0 - c_2/2,
b_k = (c_{k-1} - c_{k+1})/(2k) if 0 < k \leq n.
with c_{n+1} = c_{n+2} = 0.
Pages 32-33 of Mason & Handscomb,
"Chebyshev Polynomials". Chapman & Hall/CRC (2003).
###########################################################################
"""
# Handle the empty case:
n = len(coefficients)
if n == 0:
return np.zeros((0,))
# Make room in c[k] with zeros
c = np.zeros((n + 2,))
c[:n] = coefficients
# Initialize vector b for the integral
b = np.zeros((n + 1,))
# values of b_(2) ... b_(n+1):
b[2:] = (c[1:n] - c[3:n + 2]) / (2.0 * np.arange(2, n + 1))
# value of b_1
b[1] = c[0] - c[2] / 2.0
v = np.ones((n,))
v[1::2] = -1.0
# value of b_0 such that f(-1) = 0
b[0] = np.dot(v, b[1:])
return b
@complexify
@njit
def chebyshev_definite_integral(coefficients: np.array) -> float:
"""Definite integral of a function on the interval [-1, 1]."""
n = len(coefficients)
# Get the length of the coefficients:
if n == 0: # Trivial cases:
return np.nan
if n == 1: # Constant Function
return 2.0 * coefficients[0]
# General case
c = np.zeros((n,))
c[:n] = coefficients
# Evaluate the integral using Chebyshev coefficients (see Thm. 19.2 of
# Trefethen, Approximation Theory and Approximation Practice, SIAM, 2013, which
# states that \int_{-1}^1 T_k(x) dx = 2/(1-k^2) for k even and zero for k odd).
c[1::2] = 0.0
# For jitted code, we have to do this slightly explicitly:
d = np.zeros((n,))
# k = 0 and k = 1 are handled separately
d[:2] = [2.0, 0.0]
d[2:n] = 2.0 / (1.0 - np.arange(2.0, n) ** 2)
return np.dot(d, c) # type: ignore # numba
@complexify
@njit
def chebyshev_coefficients_of_derivative(c: np.array) -> np.array:
"""Recurrence relation for coefficients of derivative. c is the array of coefficients of a Chebyshev series. c_out
is the array of coefficients for the derivative.
:param c: input coefficients
:return: c_out: coefficients of the derivative
"""
n = len(c)
# Empty and constant case
if n <= 1:
return np.zeros((n,))
c_out = np.zeros((n - 1,))
w = 2.0 * np.arange(1.0, n)
v = w * c[1:]
c_out[n - 2::-2] = v[n - 2::-2].cumsum()
c_out[n - 3::-2] = v[n - 3::-2].cumsum()
c_out[0] = 0.5 * c_out[0]
return c_out
def chebyshev_clenshaw_evaluation(x: np.array, coefficients: np.array) -> np.array:
"""A wrapper for chebyshev_clenshaw_evaluation_internal()"""
# Make sure x is cast to a numpy array
x = 1.0 * np.array(x)
# We only expect real x, so parametrise as a function of the coefficients c and
# use the complexified version of the evaluation
@complexify
def g(c: np.ndarray) -> np.ndarray:
return chebyshev_clenshaw_evaluation_internal(x, c)
return g(coefficients)
@njit
def chebyshev_clenshaw_evaluation_internal(x: np.array, c: np.array) -> np.array:
"""Clenshaw's algorithm for evaluating a Chebyshev series with real coefficients c at points x.
NOTE: the algorithm works for complex numbers, but since we are using jit, we restrict
this to reals. One can remove @njit and use this code directly for the general case
or use the wrapper chebyshev_clenshaw_evaluation() for general case.
c is assumed to be an array of real numbers
x is assumed to be an array
y is an array of values of the Chebyshev expansion with coefficients c at x
"""
# Clenshaw's algorithm for evaluating scalar-valued functions.
bk1 = 0.0 * x
bk2 = np.copy(bk1)
x = 2.0 * x
n = len(c)
for k in np.arange(n - 1, 1, -2):
bk2 = c[k] + x * bk1 - bk2
bk1 = c[k - 1] + x * bk2 - bk1
if not | np.mod(n, 2) | numpy.mod |
import numpy as np
import scipy.sparse as sp
import time
import scipy.linalg as la
class CLPerceptron():
S_X = np.array([[0, 1], [1, 0]], dtype=complex)
S_Y = np.array([[0, complex(0, -1)], [complex(0, 1), 0]], dtype=complex)
S_Z = np.array([[1, 0], [0, -1]], dtype=complex)
S = np.array([S_X, S_Y, S_Z])
def __init__(self, D, y, bias=False, manual_lookup=False):
self.D = D
self.bias = bias
self.y = y
self.n_samples = D.shape[0]
if bias:
self.add_bias()
self.dim = self.D.shape[1]
if not manual_lookup:
self._create_statistics_lookup_table()
def _create_statistics_lookup_table(self):
self.bx_lookup = np.zeros(self.n_samples)
self.qx_lookup = np.zeros(self.n_samples)
# gather statistics for each sample, store these based on index
for i in range(self.n_samples):
self.bx_lookup[i] = self._bx(self.D, self.D[i, :], self.y)
self.qx_lookup[i] = self._qx(self.D, self.D[i, :])
def train(self, max_iter, eta, calculate_loss=False, tol=10e-8, verbose=True):
_w = np.random.uniform(low=-1, high=1, size=self.dim)
_loss = []
_lh = []
_lh.append(self.likelihood(_w))
for i in range(max_iter):
h = np.dot(self.D, _w)
h_x = np.sqrt(np.square(h))
_delta_z = self.qx_lookup * (self.bx_lookup - np.tanh(h_x) * (h / h_x))
_w += eta * np.einsum(_delta_z, [0, ], self.D, [0, 1], [1, ]) # reg - 10e-10 * np.sum(_w)
_lh.append(self.likelihood(_w))
if abs(_lh[i] - _lh[i - 1]) < tol:
if verbose:
print("Convergence reached after {} steps".format(i))
self.w = _w
self.lh = _lh
self.loss = _loss
return
if verbose:
print("No convergence after {} steps!".format(max_iter))
self.w = _w
self.lh = _lh
self.loss = _loss
return
def predict(self, samples, ev=True):
def get_evalue(sample):
h = np.dot(self.w.T, sample)
p_one = 0.5 * (np.tanh(h) + 1)
return p_one, 1-p_one
# add bias if our training was done with bias
if self.bias:
samples = np.hstack([samples, np.ones(samples.shape[0]).reshape(-1, 1)])
# works similarly as calculate loss, but now returns the expectation value
p = np.apply_along_axis(get_evalue, axis=1, arr=samples)
if ev:
return p[:,0] - p[:,1]
return p[:,0], p[:,1]
def get_loss(self):
y_pred = self.predict(self.D)
loss = 0.5 * np.sum(np.absolute(y_pred - self.y))
return loss / self.n_samples
# def predict(self, _samples):
# return np.sign(np.dot(self.w, _samples.T))
def predict_sigm(self, _samples):
return self._sigmoid(np.dot(self.w, _samples.T))
def _H_x(self, _x,):
# calculate parameterised hamiltonian, in pauli basis.
_h = np.dot(self.w.T, _x)
_H = _h * CLQPerceptron.S[2]
return _H
@staticmethod
def _bx(X, sample, y):
_idx = np.where((X == tuple(sample)).all(axis=1))[0]
return np.sum(y[_idx]) / len(_idx)
@staticmethod
def _qx(X, sample):
_idx = np.where((X == tuple(sample)).all(axis=1))[0]
return len(_idx) / X.shape[0]
def likelihood(self, _w):
h = np.dot(_w.T, self.D.T)
h_x = np.sqrt(np.square(h))
L = np.sum(self.qx_lookup * (h * self.bx_lookup - np.logaddexp(h_x, -h_x)))
return L
def _delta_w(self, idx):
h = np.dot(self.w.T, self.D[idx, :])
return self.qx_lookup[idx] * (self.bx_lookup[idx] - np.tanh(h))
@staticmethod
def _sigmoid(x):
return 1 / (1 + | np.exp(-x) | numpy.exp |
import torch
from torch.autograd import Variable
import torch.nn.init as init
import torch.nn.functional as F
import numpy as np
import os
from tqdm import tqdm
from harvester import HardestNegativeTripletSelector, AllTripletSelector
from utils import compute_eer
class TrainLoop(object):
def __init__(self, model, optimizer, train_loader, valid_loader, margin, lambda_, patience, verbose=-1, cp_name=None, save_cp=False, checkpoint_path=None, checkpoint_epoch=None, swap=False, cuda=True):
if checkpoint_path is None:
# Save to current directory
self.checkpoint_path = os.getcwd()
else:
self.checkpoint_path = checkpoint_path
if not os.path.isdir(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.save_epoch_fmt = os.path.join(self.checkpoint_path, cp_name) if cp_name else os.path.join(self.checkpoint_path, 'checkpoint_{}ep.pt')
self.cuda_mode = cuda
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.valid_loader = valid_loader
self.history = {'train_loss': [], 'train_loss_batch': [], 'triplet_loss': [], 'triplet_loss_batch': [], 'ce_loss': [], 'ce_loss_batch': [],'ErrorRate': [], 'EER': []}
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, factor=0.5, patience=patience, verbose=True if verbose>0 else False, threshold=1e-4, min_lr=1e-8)
self.total_iters = 0
self.cur_epoch = 0
self.lambda_ = lambda_
self.swap = swap
self.margin = margin
self.harvester = HardestNegativeTripletSelector(margin=0.1, cpu=not self.cuda_mode)
self.harvester_val = AllTripletSelector()
self.verbose = verbose
self.save_cp = save_cp
self.device = next(self.model.parameters()).device
if checkpoint_epoch is not None:
self.load_checkpoint(self.save_epoch_fmt.format(checkpoint_epoch))
def train(self, n_epochs=1, save_every=1):
while self.cur_epoch < n_epochs:
np.random.seed()
if self.verbose>0:
print(' ')
print('Epoch {}/{}'.format(self.cur_epoch+1, n_epochs))
train_iter = tqdm(enumerate(self.train_loader))
else:
train_iter = enumerate(self.train_loader)
ce=0.0
triplet_loss=0.0
train_loss=0.0
# Train step
for t, batch in train_iter:
ce_batch, triplet_loss_batch = self.train_step(batch)
ce += ce_batch
triplet_loss += triplet_loss_batch
train_loss += ce_batch + triplet_loss_batch
self.history['train_loss_batch'].append(ce_batch + triplet_loss_batch)
self.history['triplet_loss_batch'].append(triplet_loss_batch)
self.history['ce_loss_batch'].append(ce_batch)
self.total_iters += 1
self.history['train_loss'].append(train_loss/(t+1))
self.history['triplet_loss'].append(triplet_loss/(t+1))
self.history['ce_loss'].append(ce/(t+1))
if self.verbose>0:
print(' ')
print('Total train loss, Triplet loss, and Cross-entropy: {:0.4f}, {:0.4f}, {:0.4f}'.format(self.history['train_loss'][-1], self.history['triplet_loss'][-1], self.history['ce_loss'][-1]))
# Validation
tot_correct = 0
tot_ = 0
scores, labels = None, None
for t, batch in enumerate(self.valid_loader):
correct, total, scores_batch, labels_batch = self.valid(batch)
try:
scores = np.concatenate([scores, scores_batch], 0)
labels = np.concatenate([labels, labels_batch], 0)
except:
scores, labels = scores_batch, labels_batch
tot_correct += correct
tot_ += total
self.history['EER'].append(compute_eer(labels, scores))
self.history['ErrorRate'].append(1.-float(tot_correct)/tot_)
if self.verbose>0:
print(' ')
print('Current, best validation error rate, and epoch: {:0.4f}, {:0.4f}, {}'.format(self.history['ErrorRate'][-1], | np.min(self.history['ErrorRate']) | numpy.min |
import numpy as np
from scipy.stats import linregress as li
from math import exp
def calc_factor(field,stepsize=0.01):
"""
Function for calculation of the summed binning.
The returned result is an integral over the binning of the velocities.
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple of numpy arrays
"""
result_pos = []
result_neg = []
alpha = 0.
#: binning of the positive half
while alpha <= np.max(field)+stepsize:
pos = alpha
neg = 0.
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
alpha = 0.
#: binning of the negative half
while alpha <= np.abs(np.min(field))+stepsize:
pos = 0.
neg = -1.*alpha
filtered = np.copy(field)
filtered[filtered<=neg] = np.nan
filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def calc_derivative(field,stepsize=0.01):
"""
Function for calculation of the binning.
The returned result is the binning of the velocities.
It is called derivative because it is mathematically the derivative of the function:
.. function:: velofilter.calc_factor
It is done for the negative and positive half separately.
:param field: is a 1D field which will be binned
:param stepsize: is the step size for the velocity
:return (positive,negative):
velocities and the binning result for positive half and negative half are returned
as a tuple
"""
result_pos = []
result_neg = []
outlier = 1.
alpha = 0.
while alpha <= np.max(field)+stepsize:
pos = alpha+stepsize
neg = alpha
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_pos.append([alpha,outlier])
alpha += stepsize
outlier = 1.
alpha = 0.
while alpha <= np.abs(np.min(field))+stepsize:
pos = -1.*alpha
neg = -1.*(alpha+stepsize)
filtered = np.copy(field)
filtered[(filtered<=neg) | (filtered>pos)] = np.nan
#filtered[filtered>pos] = np.nan
outlier = np.count_nonzero(~np.isnan(filtered))/np.float(np.count_nonzero(~np.isnan(field)))
result_neg.append([-1.*alpha,outlier])
alpha += stepsize
return (np.array(result_pos),np.array(result_neg))
def filter(piv,tfactor=3.,dalpha=.01):
"""
Function for calculating the cutoff values.
:param object piv: PIV class object
This is supposed to be an object from a Direct or adaptive Class
it is needed to get the velocities
:param double tfactor: Factor for cutoff in the velocity binning
The default value is set to 3 which works for many cases
:param double dalpha: value for differential velocity
The default is set to .01 which work for many cases
if the velocities vary over a larger ranger use a larger value
"""
#: pre sampling
numberup = np.count_nonzero(piv.u<=0.)/np.float(np.count_nonzero(piv.u))
numberun = np.count_nonzero(piv.u>0.)/np.float(np.count_nonzero(piv.u))
numbervp = np.count_nonzero(piv.v<=0.)/np.float(np.count_nonzero(piv.v))
numbervn = np.count_nonzero(piv.v>0.)/np.float(np.count_nonzero(piv.v))
upos = numberup
uneg = numberun
vpos = numbervp
vneg = numbervn
#: get alpha dependency
up_alpha, un_alpha = calc_factor(piv.u,dalpha)
vp_alpha, vn_alpha = calc_factor(piv.v,dalpha)
#: calculate derivative directly from data
dup_alpha1, dun_alpha1 = calc_derivative(piv.u,dalpha)
dvp_alpha1, dvn_alpha1 = calc_derivative(piv.v,dalpha)
dup_alpha = dup_alpha1[:,1]
dun_alpha = dun_alpha1[:,1]
dvp_alpha = dvp_alpha1[:,1]
dvn_alpha = dvn_alpha1[:,1]
#get boundaries
boundup = np.sum(dup_alpha[0:5])/5./np.exp(tfactor)
boundun = np.sum(dun_alpha[0:5])/5./np.exp(tfactor)
boundvp = np.sum(dvp_alpha[0:5])/5./np.exp(tfactor)
boundvn = np.sum(dvn_alpha[0:5])/5./ | np.exp(tfactor) | numpy.exp |
#!/usr/bin/env python
u"""
read_cryosat_L1b.py
Written by <NAME> (02/2020)
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
OUTPUTS:
Location: Time and Orbit Group
Data: Measurements Group
Geometry: External Corrections Group
Waveform_1Hz: Average Waveforms Group
Waveform_20Hz: Waveforms Group (with SAR/SARIN Beam Behavior Parameters)
METADATA: MPH, SPH and DSD Header data
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 02/2020: tilde-expansion of cryosat-2 files before opening
add scale factors function for converting packed units in binary files
convert from hard to soft tabulation
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
will output with same variable names as the binary read functions
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import os
import re
import netCDF4
import numpy as np
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baselines A and B
def cryosat_baseline_AB(fid, n_records, MODE):
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100): converted from telemetry units to be
#-- the noise floor of FBR measurement echoes.
#-- Set to -9999.99 when the telemetry contains zero.
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
#-- CryoSat-2 mode specific waveforms
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [512]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [512]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
#-- Phase Difference [512]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-5))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline C
def cryosat_baseline_C(fid, n_records, MODE):
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- Time: day part
Location['Day'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Time: second part
Location['Second'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Time: microsecond part
Location['Micsec'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- USO correction factor
Location['USO_Corr'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Mode ID
Location['Mode_ID'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Source sequence counter
Location['SSC'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Instrument configuration
Location['Inst_config'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Record Counter
Location['Rec_Count'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.zeros((n_records,n_blocks,3),dtype=np.int32)
#-- Star Tracker ID
Location['ST_ID'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.zeros((n_records,n_blocks),dtype=np.uint32)
Location['Spares'] = np.zeros((n_records,n_blocks,2),dtype=np.int16)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.zeros((n_records,n_blocks),dtype=np.int64)
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.zeros((n_records,n_blocks),dtype=np.int32)
Data_20Hz['Spares1'] = np.zeros((n_records,n_blocks,4),dtype=np.int8)
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = np.zeros((n_records),dtype=np.int32)
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = np.zeros((n_records),dtype=np.int32)
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = np.zeros((n_records),dtype=np.int32)
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = np.zeros((n_records),dtype=np.int32)
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = np.zeros((n_records),dtype=np.int32)
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare1'] = np.zeros((n_records,4),dtype=np.int8)
#-- Corrections Status Flag
Geometry['Corr_status'] = np.zeros((n_records),dtype=np.uint32)
#-- Correction Error Flag
Geometry['Corr_error'] = np.zeros((n_records),dtype=np.uint32)
Geometry['Spare2'] = np.zeros((n_records,4),dtype=np.int8)
#-- CryoSat-2 Average Waveforms Groups
Waveform_1Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
#-- Data Record Time (MDSR Time Stamp)
Waveform_1Hz['Day'] = np.zeros((n_records),dtype=np.int32)
Waveform_1Hz['Second'] = np.zeros((n_records),dtype=np.uint32)
Waveform_1Hz['Micsec'] = np.zeros((n_records),dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = np.zeros((n_records),dtype=np.int32)
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = np.zeros((n_records),dtype=np.int32)
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = np.zeros((n_records),dtype=np.int32)
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = np.zeros((n_records),dtype=np.int64)
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
Waveform_1Hz['Flags'] = np.zeros((n_records),dtype=np.uint16)
#-- CryoSat-2 Waveforms Groups
#-- Beam Behavior Parameters
Beam_Behavior = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
#-- Standard deviation as a function of boresight angle (microradians)
Beam_Behavior['SD_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Stack Center angle as a function of boresight angle (microradians)
Beam_Behavior['Center_boresight_angle'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-7),dtype=np.int16)
#-- CryoSat-2 mode specific waveform variables
Waveform_20Hz = {}
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.uint16)
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
Waveform_20Hz['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
#-- Beam behaviour parameters
Waveform_20Hz['Beam'] = Beam_Behavior
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int16)
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_BC_RW),dtype=np.int32)
#-- for each record in the CryoSat file
for r in range(n_records):
#-- CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
Location['Day'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Location['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
Location['ST_ID'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Location['Roll'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Pitch'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['Yaw'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Location['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
Location['Spares'][r,b,:] = np.fromfile(fid,dtype='>i2',count=2)
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
for b in range(n_blocks):
Data_20Hz['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
Data_20Hz['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Data_20Hz['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 External Corrections Group
Geometry['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
Geometry['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
Geometry['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
Geometry['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
#-- CryoSat-2 Average Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SIN'):
#-- SARIN Mode
Waveform_1Hz['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
Waveform_1Hz['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
Waveform_1Hz['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_RW)
Waveform_1Hz['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_1Hz['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_1Hz['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
#-- CryoSat-2 Waveforms Groups
if (MODE == 'LRM'):
#-- Low-Resolution Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
elif (MODE == 'SAR'):
#-- SAR Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
elif (MODE == 'SIN'):
#-- SARIN Mode
for b in range(n_blocks):
Waveform_20Hz['Waveform'][r,b,:] = np.fromfile(fid,dtype='>u2',count=n_SARIN_BC_RW)
Waveform_20Hz['Linear_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['Power2_Wfm_Multiplier'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
Waveform_20Hz['N_avg_echoes'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Flags'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['SD'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Amplitude'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Skewness'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Kurtosis'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['SD_boresight_angle'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
Waveform_20Hz['Beam']['Center_boresight_angle'][r,b] = np.fromfile(fid,dtype='>i2',count=1)
Waveform_20Hz['Beam']['Spare'][r,b,:] = np.fromfile(fid,dtype='>i2',count=(n_BeamBehaviourParams-7))
Waveform_20Hz['Coherence'][r,b,:] = np.fromfile(fid,dtype='>i2',count=n_SARIN_BC_RW)
Waveform_20Hz['Phase_diff'][r,b,:] = np.fromfile(fid,dtype='>i4',count=n_SARIN_BC_RW)
#-- Bind all the bits of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
CS_l1b_mds['Location'] = Location
CS_l1b_mds['Data'] = Data_20Hz
CS_l1b_mds['Geometry'] = Geometry
CS_l1b_mds['Waveform_1Hz'] = Waveform_1Hz
CS_l1b_mds['Waveform_20Hz'] = Waveform_20Hz
#-- return the output dictionary
return CS_l1b_mds
#-- PURPOSE: Initiate L1b MDS variables for CryoSat Baseline D (netCDF4)
def cryosat_baseline_D(full_filename, MODE, UNPACK=False):
#-- open netCDF4 file for reading
fid = netCDF4.Dataset(os.path.expanduser(full_filename),'r')
#-- use original unscaled units unless UNPACK=True
fid.set_auto_scale(UNPACK)
#-- get dimensions
ind_first_meas_20hz_01 = fid.variables['ind_first_meas_20hz_01'][:].copy()
ind_meas_1hz_20_ku = fid.variables['ind_meas_1hz_20_ku'][:].copy()
n_records = len(ind_first_meas_20hz_01)
n_SARIN_D_RW = 1024
n_SARIN_RW = 512
n_SAR_D_RW = 256
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
#-- CryoSat-2 Time and Orbit Group
Location = {}
#-- MDS Time
Location['Time'] = np.ma.zeros((n_records,n_blocks))
Location['Time'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
time_20_ku = fid.variables['time_20_ku'][:].copy()
#-- Time: day part
Location['Day'] = np.ma.zeros((n_records,n_blocks))
Location['Day'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: second part
Location['Second'] = np.ma.zeros((n_records,n_blocks))
Location['Second'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- Time: microsecond part
Location['Micsec'] = np.ma.zeros((n_records,n_blocks))
Location['Micsec'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
#-- USO correction factor
Location['USO_Corr'] = np.ma.zeros((n_records,n_blocks))
Location['USO_Corr'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
uso_cor_20_ku = fid.variables['uso_cor_20_ku'][:].copy()
#-- Mode ID
Location['Mode_ID'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_op_20_ku =fid.variables['flag_instr_mode_op_20_ku'][:].copy()
#-- Mode Flags
Location['Mode_flags'] = np.ma.zeros((n_records,n_blocks))
Location['Mode_flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_flags_20_ku =fid.variables['flag_instr_mode_flags_20_ku'][:].copy()
#-- Platform attitude control mode
Location['Att_control'] = np.ma.zeros((n_records,n_blocks))
Location['Att_control'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_mode_att_ctrl_20_ku =fid.variables['flag_instr_mode_att_ctrl_20_ku'][:].copy()
#-- Instrument configuration
Location['Inst_config'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_config'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_flags_20_ku = fid.variables['flag_instr_conf_rx_flags_20_ku'][:].copy()
#-- acquisition band
Location['Inst_band'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_band'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_bwdt_20_ku = fid.variables['flag_instr_conf_rx_bwdt_20_ku'][:].copy()
#-- instrument channel
Location['Inst_channel'] = np.ma.zeros((n_records,n_blocks))
Location['Inst_channel'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_in_use_20_ku = fid.variables['flag_instr_conf_rx_in_use_20_ku'][:].copy()
#-- tracking mode
Location['Tracking_mode'] = np.ma.zeros((n_records,n_blocks))
Location['Tracking_mode'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_trk_mode_20_ku = fid.variables['flag_instr_conf_rx_trk_mode_20_ku'][:].copy()
#-- Source sequence counter
Location['SSC'] = np.ma.zeros((n_records,n_blocks))
Location['SSC'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
seq_count_20_ku = fid.variables['seq_count_20_ku'][:].copy()
#-- Record Counter
Location['Rec_Count'] = np.ma.zeros((n_records,n_blocks))
Location['Rec_Count'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
rec_count_20_ku = fid.variables['rec_count_20_ku'][:].copy()
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lat'] = np.ma.zeros((n_records,n_blocks))
Location['Lat'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lat_20_ku = fid.variables['lat_20_ku'][:].copy()
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Location['Lon'] = np.ma.zeros((n_records,n_blocks))
Location['Lon'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
lon_20_ku = fid.variables['lon_20_ku'][:].copy()
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Location['Alt'] = np.ma.zeros((n_records,n_blocks))
Location['Alt'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
alt_20_ku = fid.variables['alt_20_ku'][:].copy()
#-- Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
Location['Alt_rate'] = np.ma.zeros((n_records,n_blocks))
Location['Alt_rate'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
orb_alt_rate_20_ku = fid.variables['orb_alt_rate_20_ku'][:].copy()
#-- Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
#-- ITRF= International Terrestrial Reference Frame
Location['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3))
Location['Sat_velocity'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
sat_vel_vec_20_ku = fid.variables['sat_vel_vec_20_ku'][:].copy()
#-- Real beam direction vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
#-- CRF= CryoSat Reference Frame.
Location['Real_beam'] = np.ma.zeros((n_records,n_blocks,3))
Location['Real_beam'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
beam_dir_vec_20_ku = fid.variables['beam_dir_vec_20_ku'][:].copy()
#-- Interferometric baseline vector. In CRF: packed units (micro-m/s, 1e-6 m/s)
Location['Baseline'] = np.ma.zeros((n_records,n_blocks,3))
Location['Baseline'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
inter_base_vec_20_ku = fid.variables['inter_base_vec_20_ku'][:].copy()
#-- Star Tracker ID
Location['ST_ID'] = np.ma.zeros((n_records,n_blocks))
Location['ST_ID'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_instr_conf_rx_str_in_use_20_ku = fid.variables['flag_instr_conf_rx_str_in_use_20_ku'][:].copy()
#-- Antenna Bench Roll Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Roll'] = np.ma.zeros((n_records,n_blocks))
Location['Roll'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_roll_angle_str_20_ku = fid.variables['off_nadir_roll_angle_str_20_ku'][:].copy()
#-- Antenna Bench Pitch Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Pitch'] = np.ma.zeros((n_records,n_blocks))
Location['Pitch'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_pitch_angle_str_20_ku = fid.variables['off_nadir_pitch_angle_str_20_ku'][:].copy()
#-- Antenna Bench Yaw Angle (Derived from star trackers)
#-- packed units (0.1 micro-degree, 1e-7 degrees)
Location['Yaw'] = np.ma.zeros((n_records,n_blocks))
Location['Yaw'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
off_nadir_yaw_angle_str_20_ku = fid.variables['off_nadir_yaw_angle_str_20_ku'][:].copy()
#-- Measurement Confidence Data Flags
#-- Generally the MCD flags indicate problems when set
#-- If MCD is 0 then no problems or non-nominal conditions were detected
#-- Serious errors are indicated by setting bit 31
Location['MCD'] = np.ma.zeros((n_records,n_blocks))
Location['MCD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_mcd_20_ku = fid.variables['flag_mcd_20_ku'][:].copy()
#-- CryoSat-2 Measurement Group
#-- Derived from instrument measurement parameters
Data_20Hz = {}
#-- Window Delay reference (two-way) corrected for instrument delays
Data_20Hz['TD'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
window_del_20_ku = fid.variables['window_del_20_ku'][:].copy()
#-- H0 Initial Height Word from telemetry
Data_20Hz['H_0'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['H_0'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_applied_20_ku = fid.variables['h0_applied_20_ku'][:].copy()
#-- COR2 Height Rate: on-board tracker height rate over the radar cycle
Data_20Hz['COR2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['COR2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
cor2_applied_20_ku = fid.variables['cor2_applied_20_ku'][:].copy()
#-- Coarse Range Word (LAI) derived from telemetry
Data_20Hz['LAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['LAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_lai_word_20_ku = fid.variables['h0_lai_word_20_ku'][:].copy()
#-- Fine Range Word (FAI) derived from telemetry
Data_20Hz['FAI'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['FAI'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
h0_fai_word_20_ku = fid.variables['h0_fai_word_20_ku'][:].copy()
#-- Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
#-- Gain calibration corrections are applied (Sum of AGC stages 1 and 2
#-- plus the corresponding corrections) (dB/100)
Data_20Hz['AGC_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch1_20_ku = fid.variables['agc_ch1_20_ku'][:].copy()
#-- Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
#-- Gain calibration corrections are applied (dB/100)
Data_20Hz['AGC_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['AGC_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
agc_ch2_20_ku = fid.variables['agc_ch2_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_gain_CH1'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
tot_gain_ch1_20_ku = fid.variables['tot_gain_ch1_20_ku'][:].copy()
#-- Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
Data_20Hz['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_gain_CH2'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
tot_gain_ch2_20_ku = fid.variables['tot_gain_ch2_20_ku'][:].copy()
#-- Transmit Power in microWatts
Data_20Hz['TX_Power'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TX_Power'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
transmit_pwr_20_ku = fid.variables['transmit_pwr_20_ku'][:].copy()
#-- Doppler range correction: Radial component (mm)
#-- computed for the component of satellite velocity in the nadir direction
Data_20Hz['Doppler_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_cor_20_ku = fid.variables['dop_cor_20_ku'][:].copy()
#-- Value of Doppler Angle for the first single look echo (1e-7 radians)
Data_20Hz['Doppler_angle_start'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_angle_start'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_angle_start_20_ku = fid.variables['dop_angle_start_20_ku'][:].copy()
#-- Value of Doppler Angle for the last single look echo (1e-7 radians)
Data_20Hz['Doppler_angle_stop'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Doppler_angle_stop'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
dop_angle_stop_20_ku = fid.variables['dop_angle_stop_20_ku'][:].copy()
#-- Instrument Range Correction: transmit-receive antenna (mm)
#-- Calibration correction to range on channel 1 computed from CAL1.
Data_20Hz['TR_inst_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_inst_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_range_tx_rx_20_ku = fid.variables['instr_cor_range_tx_rx_20_ku'][:].copy()
#-- Instrument Range Correction: receive-only antenna (mm)
#-- Calibration correction to range on channel 2 computed from CAL1.
Data_20Hz['R_inst_range'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['R_inst_range'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_range_rx_20_ku = fid.variables['instr_cor_range_rx_20_ku'][:].copy()
#-- Instrument Gain Correction: transmit-receive antenna (dB/100)
#-- Calibration correction to gain on channel 1 computed from CAL1
Data_20Hz['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['TR_inst_gain'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_gain_tx_rx_20_ku = fid.variables['instr_cor_gain_tx_rx_20_ku'][:].copy()
#-- Instrument Gain Correction: receive-only (dB/100)
#-- Calibration correction to gain on channel 2 computed from CAL1
Data_20Hz['R_inst_gain'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['R_inst_gain'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_cor_gain_rx_20_ku = fid.variables['instr_cor_gain_rx_20_ku'][:].copy()
#-- Internal Phase Correction (microradians)
Data_20Hz['Internal_phase'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Internal_phase'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_int_ph_cor_20_ku = fid.variables['instr_int_ph_cor_20_ku'][:].copy()
#-- External Phase Correction (microradians)
Data_20Hz['External_phase'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['External_phase'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
instr_ext_ph_cor_20_ku = fid.variables['instr_ext_ph_cor_20_ku'][:].copy()
#-- Noise Power measurement (dB/100)
Data_20Hz['Noise_power'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Noise_power'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
noise_power_20_ku = fid.variables['noise_power_20_ku'][:].copy()
#-- Phase slope correction (microradians)
#-- Computed from the CAL-4 packets during the azimuth impulse response
#-- amplitude (SARIN only). Set from the latest available CAL-4 packet.
Data_20Hz['Phase_slope'] = np.ma.zeros((n_records,n_blocks))
Data_20Hz['Phase_slope'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
ph_slope_cor_20_ku = fid.variables['ph_slope_cor_20_ku'][:].copy()
#-- CryoSat-2 External Corrections Group
Geometry = {}
#-- Data Record Time (MDSR Time Stamp)
Geometry['Time'] = fid.variables['time_cor_01'][:].copy()
#-- Dry Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['dryTrop'] = fid.variables['mod_dry_tropo_cor_01'][:].copy()
#-- Wet Tropospheric Correction packed units (mm, 1e-3 m)
Geometry['wetTrop'] = fid.variables['mod_wet_tropo_cor_01'][:].copy()
#-- Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['InvBar'] = fid.variables['inv_bar_cor_01'][:].copy()
#-- Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
Geometry['DAC'] = fid.variables['hf_fluct_total_cor_01'][:].copy()
#-- GIM Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_GIM'] = fid.variables['iono_cor_gim_01'][:].copy()
#-- Model Ionospheric Correction packed units (mm, 1e-3 m)
Geometry['Iono_model'] = fid.variables['iono_cor_01'][:].copy()
#-- Ocean tide Correction packed units (mm, 1e-3 m)
Geometry['ocTideElv'] = fid.variables['ocean_tide_01'][:].copy()
#-- Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
Geometry['lpeTideElv'] = fid.variables['ocean_tide_eq_01'][:].copy()
#-- Ocean loading tide Correction packed units (mm, 1e-3 m)
Geometry['olTideElv'] = fid.variables['load_tide_01'][:].copy()
#-- Solid Earth tide Correction packed units (mm, 1e-3 m)
Geometry['seTideElv'] = fid.variables['solid_earth_tide_01'][:].copy()
#-- Geocentric Polar tide Correction packed units (mm, 1e-3 m)
Geometry['gpTideElv'] = fid.variables['pole_tide_01'][:].copy()
#-- Surface Type: enumerated key to classify surface at nadir
#-- 0 = Open Ocean
#-- 1 = Closed Sea
#-- 2 = Continental Ice
#-- 3 = Land
Geometry['Surf_type'] = fid.variables['surf_type_01'][:].copy()
#-- Corrections Status Flag
Geometry['Corr_status'] = fid.variables['flag_cor_status_01'][:].copy()
#-- Correction Error Flag
Geometry['Corr_error'] = fid.variables['flag_cor_err_01'][:].copy()
#-- Same as the LRM/SAR groups but the waveform array is 512 bins instead of
#-- 128 and the number of echoes averaged is different.
Waveform_1Hz = {}
#-- Data Record Time (MDSR Time Stamp)
#-- Time (seconds since 2000-01-01)
time_avg_01_ku = fid.variables['time_avg_01_ku'][:].copy()
Waveform_1Hz['Time'] = time_avg_01_ku.copy()
#-- Time: day part
Waveform_1Hz['Day'] = np.array(time_avg_01_ku/86400.0, dtype=np.int32)
#-- Time: second part
Waveform_1Hz['Second'] = np.array(time_avg_01_ku -
Waveform_1Hz['Day'][:]*86400.0, dtype=np.uint32)
#-- Time: microsecond part
Waveform_1Hz['Micsec'] = np.array((time_avg_01_ku -
Waveform_1Hz['Day'][:]*86400.0 -
Waveform_1Hz['Second'][:])*1e6, dtype=np.uint32)
#-- Lat: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lat'] = fid.variables['lat_avg_01_ku'][:].copy()
#-- Lon: packed units (0.1 micro-degree, 1e-7 degrees)
Waveform_1Hz['Lon'] = fid.variables['lon_avg_01_ku'][:].copy()
#-- Alt: packed units (mm, 1e-3 m)
#-- Altitude of COG above reference ellipsoid (interpolated value)
Waveform_1Hz['Alt'] = fid.variables['alt_avg_01_ku'][:].copy()
#-- Window Delay (two-way) corrected for instrument delays
Waveform_1Hz['TD'] = fid.variables['window_del_avg_01_ku'][:].copy()
#-- 1 Hz Averaged Power Echo Waveform
Waveform_1Hz['Waveform'] = fid.variables['pwr_waveform_avg_01_ku'][:].copy()
#-- Echo Scale Factor (to scale echo to watts)
Waveform_1Hz['Linear_Wfm_Multiplier'] = fid.variables['echo_scale_factor_avg_01_ku'][:].copy()
#-- Echo Scale Power (a power of 2 to scale echo to Watts)
Waveform_1Hz['Power2_Wfm_Multiplier'] = fid.variables['echo_scale_pwr_avg_01_ku'][:].copy()
#-- Number of echoes averaged
Waveform_1Hz['N_avg_echoes'] = fid.variables['echo_numval_avg_01_ku'][:].copy()
Waveform_1Hz['Flags'] = fid.variables['flag_echo_avg_01_ku'][:].copy()
#-- CryoSat-2 Waveforms Groups
Waveform_20Hz = {}
#-- Echo Scale Factor (to scale echo to watts)
Waveform_20Hz['Linear_Wfm_Multiplier'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Linear_Wfm_Multiplier'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
echo_scale_factor_20_ku = fid.variables['echo_scale_factor_20_ku'][:].copy()
#-- Echo Scale Power (a power of 2)
Waveform_20Hz['Power2_Wfm_Multiplier'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Power2_Wfm_Multiplier'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
echo_scale_pwr_20_ku = fid.variables['echo_scale_pwr_20_ku'][:].copy()
#-- Number of echoes averaged
Waveform_20Hz['N_avg_echoes'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['N_avg_echoes'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
echo_numval_20_ku = fid.variables['echo_numval_20_ku'][:].copy()
#-- Flags for errors or information about 20Hz waveform
Waveform_20Hz['Flags'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Flags'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
flag_echo_20_ku = fid.variables['flag_echo_20_ku'][:].copy()
#-- CryoSat-2 mode specific waveform variables
if (MODE == 'LRM'):
#-- Low-Resolution Mode
#-- Averaged Power Echo Waveform [128]
Waveform_20Hz['Waveform'] = np.ma.zeros((n_records,n_blocks,n_LRM_RW))
Waveform_20Hz['Waveform'].mask = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.bool)
pwr_waveform_20_ku = fid.variables['pwr_waveform_20_ku'][:].copy()
elif (MODE == 'SAR'):
#-- SAR Mode
#-- Averaged Power Echo Waveform [256]
Waveform_20Hz['Waveform'] = np.ma.zeros((n_records,n_blocks,n_SAR_D_RW))
Waveform_20Hz['Waveform'].mask = np.zeros((n_records,n_blocks,n_SAR_D_RW),dtype=np.bool)
pwr_waveform_20_ku = fid.variables['pwr_waveform_20_ku'][:].copy()
elif (MODE == 'SIN'):
#-- SARIN Mode
#-- Averaged Power Echo Waveform [1024]
Waveform_20Hz['Waveform'] = np.ma.zeros((n_records,n_blocks,n_SARIN_D_RW))
Waveform_20Hz['Waveform'].mask = np.zeros((n_records,n_blocks,n_SARIN_D_RW),dtype=np.bool)
pwr_waveform_20_ku = fid.variables['pwr_waveform_20_ku'][:].copy()
#-- Coherence [1024]: packed units (1/1000)
Waveform_20Hz['Coherence'] = np.ma.zeros((n_records,n_blocks,n_SARIN_D_RW))
Waveform_20Hz['Coherence'].mask = np.zeros((n_records,n_blocks,n_SARIN_D_RW),dtype=np.bool)
coherence_waveform_20_ku = fid.variables['coherence_waveform_20_ku'][:].copy()
#-- Phase Difference [1024]: packed units (microradians)
Waveform_20Hz['Phase_diff'] = np.ma.zeros((n_records,n_blocks,n_SARIN_D_RW))
Waveform_20Hz['Phase_diff'].mask = np.zeros((n_records,n_blocks,n_SARIN_D_RW),dtype=np.bool)
ph_diff_waveform_20_ku = fid.variables['ph_diff_waveform_20_ku'][:].copy()
#-- Beam Behavior Parameters
if MODE in ('SAR','SIN'):
Waveform_20Hz['Beam'] = {}
#-- Standard Deviation of Gaussian fit to range integrated stack power.
Waveform_20Hz['Beam']['SD'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['SD'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_std_20_ku = fid.variables['stack_std_20_ku'][:].copy()
#-- Stack Center: Mean of Gaussian fit to range integrated stack power.
Waveform_20Hz['Beam']['Center'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Center'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_centre_20_ku = fid.variables['stack_centre_20_ku'][:].copy()
#-- Stack amplitude parameter scaled in dB/100.
Waveform_20Hz['Beam']['Amplitude'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Amplitude'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_scaled_amplitude_20_ku = fid.variables['stack_scaled_amplitude_20_ku'][:].copy()
#-- 3rd moment: providing the degree of asymmetry of the range integrated
#-- stack power distribution.
Waveform_20Hz['Beam']['Skewness'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Skewness'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_skewness_20_ku = fid.variables['stack_skewness_20_ku'][:].copy()
#-- 4th moment: Measure of peakiness of range integrated stack power distribution.
Waveform_20Hz['Beam']['Kurtosis'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Kurtosis'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_kurtosis_20_ku = fid.variables['stack_kurtosis_20_ku'][:].copy()
#-- Stack peakiness computed from the range integrated power of the single look echoes
Waveform_20Hz['Beam']['Peakiness'] = np.ma.zeros((n_records,n_blocks))
Waveform_20Hz['Beam']['Peakiness'].mask = np.zeros((n_records,n_blocks),dtype=np.bool)
stack_peakiness_20_ku = fid.variables['stack_peakiness_20_ku'][:].copy()
#-- Stack residuals of Gaussian that fits the range integrated power of the single look echoes
Waveform_20Hz['Beam']['RMS'] = | np.ma.zeros((n_records,n_blocks)) | numpy.ma.zeros |
import base64
import datetime
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from xlrd.xldate import xldate_as_datetime
from yattag import Doc
plt.rcParams.update({"figure.autolayout": True})
import matplotlib.gridspec as gridspec
import pandas as pd
import scipy.stats
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import logging
"""
TF_CPP_MIN_LOG_LEVEL:
Defaults to 0, so all logs are shown. Set TF_CPP_MIN_LOG_LEVEL to 1 to filter out INFO logs, 2 to additionally filter out WARNING, 3 to additionally filter out ERROR.
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
from tensorflow import keras
class NNetwork(object):
def __init__(self, network_count=200, epochs=1000):
logging.getLogger().setLevel(logging.INFO)
self.xl_dateformat = r"%Y-%m-%dT%H:%M"
self.model = None
self.pretrained_networks = []
self.software_version = "2.0.1"
self.input_filename = None
self.today = str(datetime.date.today())
self.avg_time_elapsed = 0
self.predictors_scaler = MinMaxScaler(feature_range=(-1, 1))
self.targets_scaler = MinMaxScaler(feature_range=(-1, 1))
self.history = None
self.file = None
self.skipped_rows = []
self.ruleset = []
self.layer1_neurons = 12
self.network_count = network_count
self.epochs = epochs
self.predictors = None
self.targets = None
self.predictions = None
self.avg_case_results_am = None
self.avg_case_results_pm = None
self.worst_case_results_am = None
self.worst_case_results_pm = None
self.WB_bandwidth = None
self.post_process_check = False # Is post-processed better than raw. If False, uses raw results, if true, uses post-processed results
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(self.layer1_neurons, input_dim=5, activation="tanh")
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer, metrics=["mse"])
def import_data_from_csv(self, filename):
"""
Imports data to the network by a comma-separated values (CSV) file.
Load data to a network that are stored in .csv file format.
The data loaded from this method can be used both for training reasons as
well as to make predictions.
:param filename: String containing the filename of the .csv file containing the input data (e.g "input_data.csv")
"""
df = pd.read_csv(filename)
self.file = df.copy()
global FRC_IN
global FRC_OUT
global WATTEMP
global COND
# Locate the fields used as inputs/predictors and outputs in the loaded file
# and split them
if "se1_frc" in self.file.columns:
FRC_IN = "se1_frc"
WATTEMP = "se1_wattemp"
COND = "se1_cond"
FRC_OUT = "se4_frc"
elif "ts_frc1" in self.file.columns:
FRC_IN = "ts_frc1"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc1"
elif "ts_frc" in self.file.columns:
FRC_IN = "ts_frc"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc"
# Standardize the DataFrame by specifying rules
# To add a new rule, call the method execute_rule with the parameters (description, affected_column, query)
self.execute_rule("Invalid tapstand FRC", FRC_IN, self.file[FRC_IN].isnull())
self.execute_rule("Invalid household FRC", FRC_OUT, self.file[FRC_OUT].isnull())
self.execute_rule(
"Invalid tapstand date/time",
"ts_datetime",
self.valid_dates(self.file["ts_datetime"]),
)
self.execute_rule(
"Invalid household date/time",
"hh_datetime",
self.valid_dates(self.file["hh_datetime"]),
)
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True) # fix dropped indices in pandas
# Locate the rows of the missing data
drop_threshold = 0.90 * len(self.file.loc[:, [FRC_IN]])
nan_rows_watt = self.file.loc[self.file[WATTEMP].isnull()]
if len(nan_rows_watt) < drop_threshold:
self.execute_rule(
"Missing Water Temperature Measurement",
WATTEMP,
self.file[WATTEMP].isnull(),
)
nan_rows_cond = self.file.loc[self.file[COND].isnull()]
if len(nan_rows_cond) < drop_threshold:
self.execute_rule("Missing EC Measurement", COND, self.file[COND].isnull())
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True)
start_date = self.file["ts_datetime"]
end_date = self.file["hh_datetime"]
durations = []
all_dates = []
collection_time = []
for i in range(len(start_date)):
try:
# excel type
start = float(start_date[i])
end = float(end_date[i])
start = xldate_as_datetime(start, datemode=0)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = xldate_as_datetime(end, datemode=0)
except ValueError:
# kobo type
start = start_date[i][:16].replace("/", "-")
end = end_date[i][:16].replace("/", "-")
start = datetime.datetime.strptime(start, self.xl_dateformat)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = datetime.datetime.strptime(end, self.xl_dateformat)
durations.append((end - start).total_seconds())
all_dates.append(datetime.datetime.strftime(start, self.xl_dateformat))
self.durations = durations
self.time_of_collection = collection_time
self.avg_time_elapsed = np.mean(durations)
# Extract the column of dates for all data and put them in YYYY-MM-DD format
self.file["formatted_date"] = all_dates
predictors = {
FRC_IN: self.file[FRC_IN],
"elapsed time": (np.array(self.durations) / 3600),
"time of collection (0=AM, 1=PM)": self.time_of_collection,
}
self.targets = self.file.loc[:, FRC_OUT]
self.var_names = [
"Tapstand FRC (mg/L)",
"Elapsed Time",
"time of collection (0=AM, 1=PM)",
]
self.predictors = pd.DataFrame(predictors)
if len(nan_rows_watt) < drop_threshold:
self.predictors[WATTEMP] = self.file[WATTEMP]
self.var_names.append("Water Temperature(" + r"$\degree$" + "C)")
self.median_wattemp = np.median(self.file[WATTEMP].dropna().to_numpy())
self.upper95_wattemp = np.percentile(
self.file[WATTEMP].dropna().to_numpy(), 95
)
if len(nan_rows_cond) < drop_threshold:
self.predictors[COND] = self.file[COND]
self.var_names.append("EC (" + r"$\mu$" + "s/cm)")
self.median_cond = np.median(self.file[COND].dropna().to_numpy())
self.upper95_cond = np.percentile(self.file[COND].dropna().to_numpy(), 95)
self.targets = self.targets.values.reshape(-1, 1)
self.datainputs = self.predictors
self.dataoutputs = self.targets
self.input_filename = filename
def set_up_model(self):
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(
self.layer1_neurons,
input_dim=len(self.datainputs.columns),
activation="tanh",
)
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer)
def train_SWOT_network(self, directory):
"""Train the set of 200 neural networks on SWOT data
Trains an ensemble of 200 neural networks on se1_frc, water temperature,
water conductivity."""
if not os.path.exists(directory):
os.makedirs(directory)
self.predictors_scaler = self.predictors_scaler.fit(self.predictors)
self.targets_scaler = self.targets_scaler.fit(self.targets)
x = self.predictors
t = self.targets
self.calibration_predictions = []
self.trained_models = {}
for i in range(self.network_count):
logging.info('Training Network ' + str(i))
model_out = self.train_network(x, t, directory)
self.trained_models.update({'model_' + str(i): model_out})
def train_network(self, x, t, directory):
"""
Trains a single Neural Network on imported data.
This method trains Neural Network on data that have previously been imported
to the network using the import_data_from_csv() method.
The network used is a Multilayer Perceptron (MLP). Input and Output data are
normalized using MinMax Normalization.
The input dataset is split in training and validation datasets, where 80% of the inputs
are the training dataset and 20% is the validation dataset.
The training history is stored in a variable called self.history (see keras documentation:
keras.model.history object)
Performance metrics are calculated and stored for evaluating the network performance.
"""
tf.keras.backend.clear_session()
early_stopping_monitor = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10,
restore_best_weights=True)
x_norm = self.predictors_scaler.transform(x)
t_norm = self.targets_scaler.transform(t)
trained_model = keras.models.clone_model(self.model)
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(x_norm, t_norm, train_size=0.333,
shuffle=True)
new_weights = [np.random.uniform(-0.05, 0.05, w.shape) for w in trained_model.get_weights()]
trained_model.set_weights(new_weights)
trained_model.compile(loss='mse', optimizer=self.optimizer)
trained_model.fit(x_norm_train, t_norm_train, epochs=self.epochs, validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor], verbose=0, batch_size=len(t_norm_train))
self.calibration_predictions.append(self.targets_scaler.inverse_transform(trained_model.predict(x_norm)))
return trained_model
def calibration_performance_evaluation(self, filename):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)
FRC_X = self.datainputs[FRC_IN].to_numpy()
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
c = self.network_count
alpha = np.zeros((test_len, (c + 1)))
beta = np.zeros((test_len, (c + 1)))
low_outlier = 0
high_outlier = 0
for a in range(0, test_len):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
for b in range(1, c):
if observation > forecast[b]:
alpha[a, b] = forecast[b] - forecast[b - 1]
beta[a, b] = 0
elif forecast[b] > observation > forecast[b - 1]:
alpha[a, b] = observation - forecast[b - 1]
beta[a, b] = forecast[b] - observation
else:
alpha[a, b] = 0
beta[a, b] = forecast[b] - forecast[b - 1]
# overwrite boundaries in case of outliers
if observation < forecast[0]:
beta[a, 0] = forecast[0] - observation
low_outlier += 1
if observation > forecast[c - 1]:
alpha[a, c] = observation - forecast[c - 1]
high_outlier += 1
alpha_bar = np.mean(alpha, axis=0)
beta_bar = np.mean(beta, axis=0)
g_bar = alpha_bar + beta_bar
o_bar = beta_bar / (alpha_bar + beta_bar)
if low_outlier > 0:
o_bar[0] = low_outlier / test_len
g_bar[0] = beta_bar[0] / o_bar[0]
else:
o_bar[0] = 0
g_bar[0] = 0
if high_outlier > 0:
o_bar[c] = high_outlier / test_len
g_bar[c] = alpha_bar[c] / o_bar[c]
else:
o_bar[c] = 0
g_bar[c] = 0
p_i = np.arange(0 / c, (c + 1) / c, 1 / c)
self.CRPS_cal = np.sum(
g_bar * ((1 - o_bar) * (p_i**2) + o_bar * ((1 - p_i) ** 2))
)
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
np.median(Y_pred, axis=0),
facecolors="r",
edgecolors="None",
s=10,
label="Forecast Median",
)
plt.vlines(
FRC_X,
np.min(Y_pred, axis=0),
np.max(Y_pred, axis=0),
color="r",
label="Forecast Range",
)
plt.xlabel("Point-of-Distribution FRC (mg/L)")
plt.ylabel("Point-of-Consumption FRC (mg/L)")
plt.xlim([0, np.max(FRC_X)])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax1 = fig.axes[0]
ax1.set_title("(a)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (0, 2), colspan=1, rowspan=1)
plt.plot(CI_x, CI_x, c="k")
plt.scatter(CI_x, capture, label="All observations")
plt.scatter(CI_x, capture_20, label="Point-of-Consumption FRC below 0.2 mg/L")
plt.xlabel("Ensemble Confidence Interval")
plt.ylabel("Percent Capture")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax2 = fig.axes[1]
ax2.set_title("(b)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (1, 2), colspan=1, rowspan=1)
plt.hist(rank, bins=(self.network_count + 1), density=True)
plt.xlabel("Rank")
plt.ylabel("Probability")
ax3 = fig.axes[2]
ax3.set_title("(c)", y=0.88, x=0.05)
plt.savefig(
os.path.splitext(filename)[0] + "_Calibration_Diagnostic_Figs.png",
format="png",
bbox_inches="tight",
)
plt.close()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png", bbox_inches="tight")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def get_bw(self):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
s2 = []
xt_yt = []
for a in range(0, len(Y_true)):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
s2 = np.append(s2, np.var(forecast))
xt_yt = np.append(xt_yt, (np.mean(forecast) - observation) ** 2)
WB_bw = np.mean(xt_yt) - (1 + 1 / self.network_count) * np.mean(s2)
return WB_bw
def post_process_performance_eval(self, bandwidth):
Y_true = np.squeeze(np.array(self.targets))
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
test_len = len(Y_true)
min_CI = []
max_CI = []
CI_90_Lower = []
CI_90_Upper = []
CI_80_Lower = []
CI_80_Upper = []
CI_70_Lower = []
CI_70_Upper = []
CI_60_Lower = []
CI_60_Upper = []
CI_50_Lower = []
CI_50_Upper = []
CI_40_Lower = []
CI_40_Upper = []
CI_30_Lower = []
CI_30_Upper = []
CI_20_Lower = []
CI_20_Upper = []
CI_10_Lower = []
CI_10_Upper = []
CI_median = []
CRPS = []
Kernel_Risk = []
evaluation_range = np.arange(-10, 10.001, 0.001)
# compute CRPS as well as the confidence intervals of each ensemble forecast
for a in range(0, test_len):
scipy_kde = scipy.stats.gaussian_kde(Y_pred[:, a], bw_method=bandwidth)
scipy_pdf = scipy_kde.evaluate(evaluation_range) * 0.001
scipy_cdf = np.cumsum(scipy_pdf)
min_CI = np.append(
min_CI, evaluation_range[np.max(np.where(scipy_cdf == 0)[0])]
)
max_CI = np.append(max_CI, evaluation_range[np.argmax(scipy_cdf)])
CI_90_Lower = np.append(
CI_90_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.05)))]
)
CI_90_Upper = np.append(
CI_90_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.95)))]
)
CI_80_Lower = np.append(
CI_80_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.1)))]
)
CI_80_Upper = np.append(
CI_80_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.9)))]
)
CI_70_Lower = np.append(
CI_70_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.15)))]
)
CI_70_Upper = np.append(
CI_70_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.85)))]
)
CI_60_Lower = np.append(
CI_60_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.2)))]
)
CI_60_Upper = np.append(
CI_60_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.8)))]
)
CI_50_Lower = np.append(
CI_50_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.25)))]
)
CI_50_Upper = np.append(
CI_50_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.75)))]
)
CI_40_Lower = np.append(
CI_40_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.3)))]
)
CI_40_Upper = np.append(
CI_40_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.7)))]
)
CI_30_Lower = np.append(
CI_30_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.35)))]
)
CI_30_Upper = np.append(
CI_30_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.65)))]
)
CI_20_Lower = np.append(
CI_20_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.4)))]
)
CI_20_Upper = np.append(
CI_20_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.6)))]
)
CI_10_Lower = np.append(
CI_10_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.45)))]
)
CI_10_Upper = np.append(
CI_10_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.55)))]
)
CI_median = np.append(
CI_median, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.50)))]
)
Kernel_Risk = np.append(Kernel_Risk, scipy_kde.integrate_box_1d(-10, 0.2))
Heaviside = (evaluation_range >= Y_true[a]).astype(int)
CRPS_dif = (scipy_cdf - Heaviside) ** 2
CRPS = np.append(CRPS, np.sum(CRPS_dif * 0.001))
mean_CRPS = np.mean(CRPS)
capture_all = (
np.less_equal(Y_true, max_CI) * np.greater_equal(Y_true, min_CI) * 1
)
capture_90 = (
np.less_equal(Y_true, CI_90_Upper)
* np.greater_equal(Y_true, CI_90_Lower)
* 1
)
capture_80 = (
np.less_equal(Y_true, CI_80_Upper)
* np.greater_equal(Y_true, CI_80_Lower)
* 1
)
capture_70 = (
np.less_equal(Y_true, CI_70_Upper)
* np.greater_equal(Y_true, CI_70_Lower)
* 1
)
capture_60 = (
np.less_equal(Y_true, CI_60_Upper)
* np.greater_equal(Y_true, CI_60_Lower)
* 1
)
capture_50 = (
np.less_equal(Y_true, CI_50_Upper)
* np.greater_equal(Y_true, CI_50_Lower)
* 1
)
capture_40 = (
np.less_equal(Y_true, CI_40_Upper)
* np.greater_equal(Y_true, CI_40_Lower)
* 1
)
capture_30 = (
np.less_equal(Y_true, CI_30_Upper)
* np.greater_equal(Y_true, CI_30_Lower)
* 1
)
capture_20 = (
np.less_equal(Y_true, CI_20_Upper)
* np.greater_equal(Y_true, CI_20_Lower)
* 1
)
capture_10 = (
np.less_equal(Y_true, CI_10_Upper)
* np.greater_equal(Y_true, CI_10_Lower)
* 1
)
length_20 = np.sum(np.less(Y_true, 0.2))
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = | np.sum(capture_70_20) | numpy.sum |
import numpy as np
import scipy.stats
import matplotlib.pyplot as plt
def testExampleFit():
'''
Create a nominal and a distorted distribution
'''
NumData = 100000
trueModel = scipy.stats.norm(loc=0, scale=0.9)
distortion = scipy.stats.norm(loc=1.0, scale=0.9)
simul = trueModel.rvs(size=NumData)
distorted = distortion.rvs(size=NumData)
data = simul + distorted
# variance scaling
mean_data = np.mean(data)
mean_simul = np.mean(simul)
estShift = mean_data - mean_simul
# up to here we correct just a shift
meanCorrectedSimul = simul + estShift
# Let's perform a simplified smearing
# shift everything to 0 mean
mean_shifted_simul = np.mean(meanCorrectedSimul)
zero_mean_simul = meanCorrectedSimul - mean_shifted_simul
# And then calculate the ratio of the data simul sigma
sigma_data = np.std(data)
sigma_simul = np.std(zero_mean_simul)
sigma_ratio = sigma_data/sigma_simul
# The final corrected one
corrected = zero_mean_simul * sigma_ratio + mean_shifted_simul
# check that the 2 first moments are now close
print("original MC mean ", np.mean(simul),
" pseudodata mean ", np.mean(data),
" corrected mean", np.mean(corrected))
print("original MC sigma ", | np.std(simul) | numpy.std |
import numpy as np
from scipy.signal import stft
SOUND_SPEED = 340 # [m/s]
# Steering vectors
def compute_steering_vectors_single_frequency(array_geometry, frequency, theta_grid, phi_grid):
# wave number
k = 2*np.pi*frequency/SOUND_SPEED
n_mics = len(array_geometry[0])
theta_grid = theta_grid * np.pi/180 # [degree] to [radian]
phi_grid = phi_grid * np.pi/180 # [degree] to [radian]
u = np.sin(theta_grid.reshape(-1, 1)).dot(np.cos(phi_grid).reshape(1, -1))
v = np.sin(theta_grid.reshape(-1, 1)).dot(np.sin(phi_grid).reshape(1, -1))
w = np.tile(np.cos(theta_grid.reshape(-1, 1)), (1, phi_grid.shape[0]))
x = u.reshape(u.shape[0], u.shape[1], 1)*array_geometry[0].reshape(1, 1, n_mics)
y = v.reshape(v.shape[0], v.shape[1], 1)*array_geometry[1].reshape(1, 1, n_mics)
z = w.reshape(w.shape[0], w.shape[1], 1)*array_geometry[2].reshape(1, 1, n_mics)
return np.exp( -1j*k*(x + y + z))
def compute_steering_vectors(array_geometry, sampling_frequency, n_fft, theta_grid, phi_grid):
n_thetas = len(theta_grid)
n_phis = len(phi_grid)
n_mics = len(array_geometry[0])
steering_vectors = np.zeros((n_fft, n_thetas, n_phis, n_mics), dtype=np.complex64)
for i_fft in range(n_fft):
frequency = (i_fft / n_fft) * (sampling_frequency/2)
steering_vectors[i_fft] = compute_steering_vectors_single_frequency(array_geometry, frequency, theta_grid, phi_grid)
return steering_vectors
def compute_sinr_2(source_tf_multichannel, interference_tf_multichannel):
source_power = 0
interference_power = 0
n_fft_bins = source_tf_multichannel.shape[0]
for i_f in range(n_fft_bins):
source_power += np.trace(source_stft_multichannel[i_f].dot(source_stft_multichannel[i_f].transpose().conjugate()))
interference_power += np.trace(interference_stft_multichannel[i_f].dot(interference_stft_multichannel[i_f].transpose().conjugate()))
return 10*np.log10(np.abs(source_power/interference_power))
def compute_sinr(source_tf_multichannel, interference_tf_multichannel, weights=None):
n_fft_bins, n_mics, _ = source_tf_multichannel.shape
source_power = 0
interference_power = 0
if weights is not None:
for i_f in range(n_fft_bins):
source_power += weights[i_f].reshape(n_mics, 1).transpose().conjugate().dot(
source_tf_multichannel[i_f].dot(
source_tf_multichannel[i_f].transpose().conjugate())).dot(
weights[i_f].reshape(n_mics, 1))
interference_power += weights[i_f].transpose().conjugate().dot(
interference_tf_multichannel[i_f].dot(
interference_tf_multichannel[i_f].transpose().conjugate())).dot(
weights[i_f])
else:
for i_f in range(n_fft_bins):
source_power += np.trace(source_tf_multichannel[i_f].dot(source_tf_multichannel[i_f].transpose().conjugate()))
interference_power += np.trace(interference_tf_multichannel[i_f].dot(interference_tf_multichannel[i_f].transpose().conjugate()))
return 10*np.log10(np.abs(source_power/interference_power))
def compute_mvdr_tf_beamformers(source_steering_vectors, tf_frames_multichannel, diagonal_loading_param=1):
n_fft_bins, n_mics = source_steering_vectors.shape
mvdr_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
n_frames = tf_frames_multichannel.shape[1]
R = 1./n_frames * tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate()) \
+ diagonal_loading_param*np.identity(n_mics, dtype=np.complex64)
invR = np.linalg.inv(R)
normalization_factor = source_steering_vectors[i_fft_bin, :].transpose().conjugate().dot(invR).dot(source_steering_vectors[i_fft_bin, :])
mvdr_tf_beamformers[i_fft_bin] = invR.dot(source_steering_vectors[i_fft_bin, :]) / (normalization_factor)
return mvdr_tf_beamformers
def compute_mvndr_tf_beamformers(source_steering_vectors, tf_frames_multichannel, regularization_param=1):
# Minimum variance near-distortless response beamformers
# w = argmin w^H*R*w + \lambda * (v_s^H*w - 1)^2
n_fft_bins, n_mics = source_steering_vectors.shape
mvndr_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
# R = tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate()) + np.identity(n_mics)
# invR = np.linalg.inv(R)
# normalization_factor = source_steering_vectors[i_fft_bin, :].transpose().conjugate().dot(invR).dot(source_steering_vectors[i_fft_bin, :])
# regularization_param = 1/normalization_factor
R = tf_frames_multichannel[i_fft_bin].dot(tf_frames_multichannel[i_fft_bin].transpose().conjugate())\
+ np.identity(n_mics)\
+ regularization_param*source_steering_vectors[i_fft_bin, :]*source_steering_vectors[i_fft_bin, :].transpose().conjugate()
invR = np.linalg.inv(R)
mvndr_tf_beamformers[i_fft_bin] = regularization_param*invR.dot(source_steering_vectors[i_fft_bin, :])
return mvndr_tf_beamformers
def compute_lcmv_tf_beamformers(steering_vectors, tf_frames_multichannel, constraint_vector):
n_fft_bins, n_mics, n_steering_vectors = steering_vectors.shape
lcmv_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
n_samples = len(tf_frames_multichannel[i_fft_bin])
R = 1./n_samples * (tf_frames_multichannel[i_fft_bin].dot(
tf_frames_multichannel[i_fft_bin].transpose().conjugate()) \
+ np.identity(n_mics) )
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors[i_fft_bin].transpose().conjugate().dot(
invR).dot(steering_vectors[i_fft_bin])
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/n_steering_vectors * 1*np.identity(n_steering_vectors)
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
lcmv_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors[i_fft_bin]).dot(
inverse_normalization_matrix).dot(constraint_vector)
return lcmv_tf_beamformers
def compute_null_controlling_tf_beamformers(source_steering_vectors, null_steering_vectors, tf_frames_multichannel,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99):
n_fft_bins, n_mics, n_null_steering_vectors = null_steering_vectors.shape
nc_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
null_steering_correlation_matrix = null_steering_vectors[i_fft_bin].dot(
null_steering_vectors[i_fft_bin].transpose().conjugate())
eigenvalues, eigenvectors = np.linalg.eigh(null_steering_correlation_matrix)
running_sums = np.cumsum(np.abs(eigenvalues[-1::-1]))
cutoff_index = np.searchsorted(running_sums,
eigenvalue_percentage_threshold * running_sums[-1])
eigenvectors = eigenvectors[:, len(eigenvalues)-cutoff_index-1:]
steering_vectors = np.hstack((source_steering_vectors[i_fft_bin].reshape(-1, 1), eigenvectors))
n_samples = len(tf_frames_multichannel[i_fft_bin])
R = 1./n_samples * (tf_frames_multichannel[i_fft_bin].dot(
tf_frames_multichannel[i_fft_bin].transpose().conjugate()) \
+ np.identity(n_mics) )
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors.transpose().conjugate().dot(
invR).dot(steering_vectors)
""" Regularization for dealing with ill-conditionaed normalization matrix
Ref: <NAME>, <NAME>, "Source reconstruction of broadband EEG/MEG data using
the frequency-adaptive broadband (FAB) beamformer", bioRxiv
Equation (12) in https://www.biorxiv.org/content/biorxiv/early/2018/12/20/502690.full.pdf
"""
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/steering_vectors.shape[1] * 10*np.identity(steering_vectors.shape[1])
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
constraint_vector = null_constraint_threshold*np.ones(steering_vectors.shape[1])
constraint_vector[0] = 1
nc_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors).dot(
inverse_normalization_matrix).dot(constraint_vector)
return nc_tf_beamformers
def compute_null_controlling_tf_beamformers_2(source_steering_vectors, null_steering_vectors, tf_sample_covariance_batch,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99, diagonal_loading_param=1):
n_fft_bins, n_mics, n_null_steering_vectors = null_steering_vectors.shape
nc_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
null_steering_correlation_matrix = null_steering_vectors[i_fft_bin].dot(
null_steering_vectors[i_fft_bin].transpose().conjugate())
eigenvalues, eigenvectors = np.linalg.eigh(null_steering_correlation_matrix)
running_sums = np.cumsum(np.abs(eigenvalues[-1::-1]))
cutoff_index = np.searchsorted(running_sums,
eigenvalue_percentage_threshold * running_sums[-1])
eigenvectors = eigenvectors[:, len(eigenvalues)-cutoff_index-1:]
steering_vectors = np.hstack((source_steering_vectors[i_fft_bin].reshape(-1, 1), eigenvectors))
R = np.sum(tf_sample_covariance_batch[:, i_fft_bin, :, :], axis=0) / len(tf_sample_covariance_batch) + diagonal_loading_param*np.identity(n_mics)
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors.transpose().conjugate().dot(
invR).dot(steering_vectors)
""" Regularization for dealing with ill-conditionaed normalization matrix
Ref: <NAME>, <NAME>, "Source reconstruction of broadband EEG/MEG data using
the frequency-adaptive broadband (FAB) beamformer", bioRxiv
Equation (12) in https://www.biorxiv.org/content/biorxiv/early/2018/12/20/502690.full.pdf
"""
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/steering_vectors.shape[1] * 10*np.identity(steering_vectors.shape[1])
inverse_normalization_matrix = np.linalg.inv(normalization_matrix)
constraint_vector = null_constraint_threshold*np.ones(steering_vectors.shape[1])
constraint_vector[0] = 1
nc_tf_beamformers[i_fft_bin] = invR.dot(steering_vectors).dot(
inverse_normalization_matrix).dot(constraint_vector)
return nc_tf_beamformers
def compute_null_controlling_minibatch_tf_beamformers(source_steering_vectors,
null_steering_vectors, tf_frames_multichannel_batch,
null_constraint_threshold, eigenvalue_percentage_threshold=0.99):
n_fft_bins, n_mics, n_null_steering_vectors = null_steering_vectors.shape
nc_tf_beamformers = np.zeros((n_fft_bins, n_mics), dtype=np.complex64)
for i_fft_bin in range(n_fft_bins):
null_steering_correlation_matrix = null_steering_vectors[i_fft_bin].dot(
null_steering_vectors[i_fft_bin].transpose().conjugate())
eigenvalues, eigenvectors = np.linalg.eigh(null_steering_correlation_matrix)
running_sums = np.cumsum(np.abs(eigenvalues[-1::-1]))
cutoff_index = np.searchsorted(running_sums,
eigenvalue_percentage_threshold * running_sums[-1])
eigenvectors = eigenvectors[:, len(eigenvalues)-cutoff_index-1:]
steering_vectors = np.hstack((source_steering_vectors[i_fft_bin].reshape(-1, 1), eigenvectors))
R = np.zeros((n_mics, n_mics), dtype=np.complex64)
for tf_frames_multichannel in tf_frames_multichannel_batch:
n_samples = len(tf_frames_multichannel[i_fft_bin])
R += 1./n_samples * (tf_frames_multichannel[i_fft_bin].dot(
tf_frames_multichannel[i_fft_bin].transpose().conjugate()))
R = R / len(tf_frames_multichannel_batch)
R += 20*np.identity(n_mics) # To prevent singularity of R
invR = np.linalg.inv(R)
normalization_matrix = steering_vectors.transpose().conjugate().dot(
invR).dot(steering_vectors)
""" Regularization for dealing with ill-conditionaed normalization matrix
Ref: <NAME>, <NAME>, "Source reconstruction of broadband EEG/MEG data using
the frequency-adaptive broadband (FAB) beamformer", bioRxiv
Equation (12) in https://www.biorxiv.org/content/biorxiv/early/2018/12/20/502690.full.pdf
"""
normalization_matrix = (1 - 1e-3)*normalization_matrix \
+ 1e-3*np.trace(normalization_matrix)/steering_vectors.shape[1] * 10*np.identity(steering_vectors.shape[1])
inverse_normalization_matrix = | np.linalg.inv(normalization_matrix) | numpy.linalg.inv |
import os
import numpy as np
import pytest
from landlab import RasterModelGrid
XX = RasterModelGrid.BAD_INDEX
@pytest.fixture
def dans_grid1():
"""
Create a 5x5 test grid.
This is a sheet flow test.
"""
mg = RasterModelGrid((5, 5), xy_spacing=(10.0, 10.0))
this_dir = os.path.abspath(os.path.dirname(__file__))
infile = os.path.join(this_dir, "test_fr_input.txt")
z = mg.node_x.copy()
A_target = (
np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[3.0, 3.0, 2.0, 1.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
]
).flatten()
* 100.0
)
frcvr_target = np.array(
[
[0, 1, 2, 3, 4],
[5, 5, 6, 7, 9],
[10, 10, 11, 12, 14],
[15, 15, 16, 17, 19],
[20, 21, 22, 23, 24],
]
).flatten()
upids_target = np.array(
[
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24],
]
).flatten()
links2rcvr_target = np.full(25, XX)
links2rcvr_target[mg.core_nodes] = | np.array([9, 10, 11, 18, 19, 20, 27, 28, 29]) | numpy.array |
import numpy as np
import pandas as pd
from tqdm import tqdm
import numpy.ma as ma
from scipy.special import gammaln
from pykalman import KalmanFilter
from pynowcasting.pycsminwel import csminwel
class BVARGLP(object):
def __init__(self, data, lags, hz=8, vc=10e6, stationary_prior=None, crit=1e-16,
hyperpriors=True, mnpsi=True, mnalpha=False, sur=True, noc=True,
fcast=False, mcmc=False, ndraws=20000, ndrawsdiscard=None, mcmcconst=1,
mcmcfcast=True, mcmcstorecoef=True, verbose=False):
"""
This class implements the Bayesian VAR from Giannone, Lenza and Primiceri (2012), hence the name GLP. The main
idea of the models is to use multiple priors, each with their own hyperprior, in order to generate a shrinkage
behaviour.
This class only accepts data with a quarterly frequency and with no missign data.
@param hyperpriors: False = no priors on hyperparameters
True = reference priors on hyperparameters (default)
[NOTE: hyperpriors on psi calibrated for data expressed in
4 x logs, such as 4 x log(GDP). Thus if interest rate is in
percentage, divide by 100]
@param vc: prior variance in the MN prior for the coefficients multiplying
the contant term (Default: vc=10e6)
@param stationary_prior: names of the variables that enter the VAR in first
differences and for which one might want to set the prior mean
on the coefficient on the first own lag in the MN prior and the
prior mean of the sum-of-coefficients prior to 0 (instead of
the typical 1)
@param mnpsi: False = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals NOT treated as
hyperparameters (set to the residual variance of an AR(1))
True = diagonal elements of the scale matrix of the IW prior on
the covariance of the residuals treated as hyperparameters (default)
@param mnalpha: False = Lag-decaying parameter of the MN prior set to 2 and
NOT treated as hyperparameter (default)
True = Lag-decaying parameter of the MN prior treated as
hyperparameter
@param sur: False = single-unit-root prior is OFF
True = single-unit-root prior is ON and its std is treated as an
hyperparameter (default)
@param noc: False = no-cointegration (sum-of coefficients) prior is OFF
True = no-cointegration (sum-of coefficients) is ON and its std is
treated as an hyperparameter (default)
@param fcast: False = does not generate forecasts at the posterior mode
True = generates forecasts at the posterior mode (default)
@param hz: number of quarters for which it generates forecasts (default: hz=8)
@param mcmc: False = does not run the MCMC (default)
True = runs the MCMC after the maximization
@param ndraws: number of draws in the MCMC (default: Ndraws=20000)
@param ndrawsdiscard: number of draws initially discarded to allow convergence
in the in the MCMC (default=Ndraws/2)
@param mcmcconst: scaling constant for the MCMC (should be calibrated to achieve
an acceptance rate of approx 25%) (default: MCMCconst=1)
@param mcmcfcast: False = does not generate forecasts when running the MCMC
True = generates forecasts while running the MCMC
(for each draw of the hyperparameters the code takes a
draw of the VAR coefficients and shocks, and generates
forecasts at horizons hz) (default).
@param mcmcstorecoef: False = does not store the MCMC draws of the VAR
coefficients and residual covariance matrix
True = stores the MCMC draws of the VAR coefficients and
residual covariance matrix (default)
@param verbose: Prints relevant information during the estimation.
@param crit: value for convergence criteria
"""
assert data.index.inferred_freq == 'Q', "input 'data' must be quarterly and recognized by pandas."
self.data = data
self.lags = lags
self.hyperpriors = hyperpriors
self.vc = vc
self.stationary_prior = stationary_prior
if stationary_prior is None:
self.pos = None
else:
self.pos = [self.data.columns.get_loc(var) for var in stationary_prior]
self.mnalpha = mnalpha
self.mnpsi = mnpsi
self.sur = sur
self.noc = noc
self.fcast = fcast
self.hz = hz
self.mcmc = mcmc
self.ndraws = ndraws
self.ndrwasdiscard = int(ndraws/2) if ndrawsdiscard is None else ndrawsdiscard
self.mcmccosnt = mcmcconst
self.mcmcfcast = mcmcfcast
self.mcmcstorecoef = mcmcstorecoef
self.verbose = verbose
self.crit = crit
self.TT = data.shape[0] # Time-series sample size without lags
self.n = data.shape[1] # Number of variables in the VAR
self.k = self.n * self.lags + 1 # Number of coefficients on each equation
self._set_priors()
self._regressor_matrix_ols()
self._minimization()
if self.fcast:
self._forecasts()
if self.mcmc:
self._mcmc()
def _set_priors(self):
# Sets up the default choices for the priors of the BVAR of Giannone, Lenza and Primiceri (2012)
if self.hyperpriors:
# hyperprior mode
mode_lambda = 0.2
mode_miu = 1
mode_theta = 1
# hyperprior sds
sd_lambda = 0.4
sd_miu = 1
sd_theta = 1
# scale and shape of the IG on psi/(d-n-1)
scalePSI = 0.02 ** 2
priorcoef = pd.DataFrame(index=['lambda', 'miu', 'theta', 'alpha', 'beta'],
columns=['r_k', 'r_theta', 'PSI'])
priorcoef.loc['lambda', 'r_k'], priorcoef.loc['lambda', 'r_theta'] = \
self._gamma_coef(mode_lambda, sd_lambda)
priorcoef.loc['miu', 'r_k'], priorcoef.loc['miu', 'r_theta'] = self._gamma_coef(mode_miu, sd_miu)
priorcoef.loc['theta', 'r_k'], priorcoef.loc['theta', 'r_theta'] = self._gamma_coef(mode_theta, sd_theta)
priorcoef.loc['alpha', 'PSI'] = scalePSI
priorcoef.loc['beta', 'PSI'] = scalePSI
self.priorcoef = priorcoef
else:
self.priorcoef = None
def _regressor_matrix_ols(self):
# purpose is to construct the SS matrix
# Constructs the matrix of regressors
n = self.n
lags = self.lags
data = self.data
x = np.zeros((self.TT, self.k))
x[:, 0] = 1
for i in range(1, self.lags + 1):
x[:, 1 + (i - 1) * n: i * n + 1] = data.shift(i).values
self.y0 = data.iloc[:lags, :].mean().values
self.x = x[lags:, :]
self.y = data.values[lags:, :]
self.T = self.y.shape[0] # Sample size after lags
# OLS for AR(1) residual variance of each equation
SS = np.zeros(self.n)
for i in range(self.n):
y_reg = self.y[1:, i]
x_reg = np.hstack((np.ones((self.T - 1, 1)), self.y[:-1, i].reshape((-1, 1))))
ar1 = OLS1(y_reg, x_reg)
SS[i] = ar1.sig2hatols
self.SS = SS
def _minimization(self):
# Starting values for the minimization
self.lambda0 = 0.2 # std of MN prior
self.theta0 = 1 # std of SUR prior
self.miu0 = 1 # std NOC prior
self.alpha0 = 2 # lag-decaying parameter of the MN prior
self.psi0 = self.SS
# Bounds for the minimization step
self.lambda_min = 0.0001
self.lambda_max = 5
self.alpha_min = 0.1
self.alpha_max = 5
self.theta_min = 0.0001
self.theta_max = 50
self.miu_min = 0.0001
self.miu_max = 50
self.psi_min = self.SS / 100
self.psi_max = self.SS * 100
# Transforming inputs to unbounded and builds the initial guess
x0 = np.array([-np.log((self.lambda_max - self.lambda0) / (self.lambda0 - self.lambda_min))])
if self.mnpsi:
inpsi = -np.log((self.psi_max - self.psi0) / (self.psi0 - self.psi_min))
x0 = np.concatenate((x0, inpsi))
if self.sur:
intheta = np.array([-np.log((self.theta_max - self.theta0) / (self.theta0 - self.theta_min))])
x0 = np.concatenate((x0, intheta))
if self.noc:
inmiu = np.array([-np.log((self.miu_max - self.miu0) / (self.miu0 - self.miu_min))])
x0 = np.concatenate((x0, inmiu))
if self.mnalpha:
inalpha = np.array([-np.log((self.alpha_max - self.alpha0) / (self.alpha0 - self.alpha_min))])
x0 = np.concatenate((x0, inalpha))
# initial guess for the inverse Hessian
H0 = 10 * np.eye(len(x0))
# Minimization of the negative of the posterior of the hyperparameters
def myfun(xxx):
logML, _, _ = self._logmlvar_formin(xxx)
return -logML
# Optimization
fh, xh, gh, h, itct, fcount, retcodeh = csminwel(fcn=myfun,
x0=x0,
h0=H0,
grad=None,
crit=self.crit,
nit=1000,
verbose=self.verbose)
self.itct = itct
self.xh = xh
self.h = h
self.log_post, self.betahat, self.sigmahat = self._logmlvar_formin(xh)
self.lamb = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-xh[0]))
self.theta = self.theta_max
self.miu = self.miu_max
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
self.psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-xh[1:self.n + 1]))
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[self.n + 1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 2]))
else: # self.sur == 0
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[self.n + 1]))
else: # self.mnpsi == 0
self.psi = self.SS
if self.sur:
# std of sur prior at the peak
self.theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-xh[1]))
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[2]))
else:
if self.noc:
# std of noc prior at the peak
self.miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-xh[1]))
if not self.mnalpha:
self.alpha = 2
else:
# Lag-decaying parameter of the MN prior
self.alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-xh[-1]))
def _forecasts(self):
# Forecasts ate the posterior mode
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ self.betahat
self.forecast = Y[-self.hz:, :]
def _mcmc(self):
# Jacobian of the transformation of the hyperparameters that has been
# used for the constrained maximization
JJ = np.exp(self.xh) / ((1 + np.exp(self.xh)) ** 2)
JJ[0] = (self.lambda_max - self.lambda_min) * JJ[0]
if self.mnpsi:
JJ[1: self.n + 1] = (self.psi_max - self.psi_min) * JJ[1: self.n + 1]
if self.sur:
JJ[self.n + 1] = (self.theta_max - self.theta_min) * JJ[self.n + 1]
if self.noc:
JJ[self.n + 2] = (self.miu_max - self.miu_min) * JJ[self.n + 2]
else:
if self.noc:
JJ[self.n + 1] = (self.miu_max - self.miu_min) * JJ[self.n + 1]
else:
if self.sur:
JJ[1] = (self.theta_max - self.theta_min) * JJ[1]
if self.noc:
JJ[2] = (self.miu_max - self.miu_min) * JJ[2]
else:
if self.noc:
JJ[1] = (self.miu_max - self.miu_min) * JJ[1]
if self.mnalpha:
JJ[-1] = (self.alpha_max - self.alpha_min) * JJ[-1]
JJ = np.diag(JJ)
HH = JJ @ self.h @ JJ
# Regularization to assure that HH is positive-definite
eigval, eigvec = np.linalg.eig(HH)
HH = eigvec @ np.diag(np.abs(eigval)) @ eigvec.T
# recovering the posterior mode
postmode = np.array([self.lamb])
if self.mnpsi:
modepsi = np.array(self.psi)
postmode = np.concatenate((postmode, modepsi))
if self.sur:
modetheta = np.array([self.theta])
postmode = np.concatenate((postmode, modetheta))
if self.noc:
modemiu = np.array([self.miu])
postmode = np.concatenate((postmode, modemiu))
if self.mnalpha:
modealpha = np.array([self.alpha])
postmode = np.concatenate((postmode, modealpha))
# starting value of the Metropolis algorithm
P = np.zeros((self.ndraws, self.xh.shape[0]))
logMLold = -10e15
while logMLold == -10e15:
P[0, :] = np.random.multivariate_normal(mean=postmode,
cov=(self.mcmccosnt ** 2) * HH)
logMLold, betadrawold, sigmadrawold = self._logmlvar_formcmc(P[0])
# matrix to store the draws of the VAR coefficients if MCMCstorecoeff is on
if self.mcmcstorecoef:
mcmc_beta = np.zeros((self.k, self.n, self.ndraws - self.ndrwasdiscard))
mcmc_sigma = np.zeros((self.n, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_beta = None
mcmc_sigma = None
# matrix to store the forecasts if MCMCfcast is on
if self.mcmcfcast:
mcmc_Dforecast = np.zeros((self.hz, self.n, self.ndraws - self.ndrwasdiscard))
else:
mcmc_Dforecast = None
# Metropolis iterations
count = 0
for i in tqdm(range(1, self.ndraws), 'MCMC Iterations', disable=not self.verbose):
# draw candidate value
P[i, :] = np.random.multivariate_normal(mean=P[i - 1, :],
cov=(self.mcmccosnt ** 2) * HH)
logMLnew, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
if logMLnew > logMLold: # if there is an improvement, accept it
logMLold = logMLnew
count = count + 1
else: # If there is no improvement, there is a chance to accept the draw
if np.random.rand() < np.exp(logMLnew - logMLold): # If accetpted
logMLold = logMLnew
count = count + 1
else: # If not accepted, overwrite the draw with the last value
P[i, :] = P[i - 1, :]
# if MCMCfcast is on, take a new draw of the VAR coefficients with
# the old hyperparameters if have rejected the new ones
if self.mcmcfcast or self.mcmcstorecoef:
_, betadrawnew, sigmadrawnew = self._logmlvar_formcmc(P[i, :])
# stores draws of VAR coefficients if MCMCstorecoeff is on
if (i >= self.ndrwasdiscard) and self.mcmcstorecoef:
mcmc_beta[:, :, i - self.ndrwasdiscard] = betadrawnew
mcmc_sigma[:, :, i - self.ndrwasdiscard] = sigmadrawnew
# produce and store the forecasts if MCMCfcast is on
if (i >= self.ndrwasdiscard) and self.mcmcfcast:
Y = np.vstack([self.y, np.zeros((self.hz, self.n))])
for tau in range(self.hz):
indexes = list(range(self.T + tau - 1, self.T + tau - self.lags - 1, -1))
xT = np.vstack([1, Y[indexes].T.reshape((self.k - 1, 1), order="F")]).T
Y[self.T + tau, :] = xT @ betadrawnew + np.random.multivariate_normal(mean=np.zeros(self.n),
cov=sigmadrawnew)
mcmc_Dforecast[:, :, i - self.ndrwasdiscard] = Y[-self.hz:, :]
# store the draws of the hyperparameters
mcmc_lambda = P[self.ndrwasdiscard:, 0] # Standard Minesota Prior
mcmc_psi = None
mcmc_theta = None
mcmc_miu = None
if self.mnpsi:
# diagonal elements of the scale matrix of the IW prior on the residual variance
mcmc_psi = P[self.ndrwasdiscard:, 1:self.n+2]
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, self.n + 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, self.n + 1]
else: # self.mnpsi == 0
if self.sur:
# std of sur prior
mcmc_theta = P[self.ndrwasdiscard:, 1]
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 2]
else: # self.sur == 0
if self.noc:
# std of noc prior
mcmc_miu = P[self.ndrwasdiscard:, 1]
if self.mnalpha:
# Lag-decaying parameter of the MN prior
mcmc_alpha = P[self.ndrwasdiscard:, -1]
self.mcmc_alpha = mcmc_alpha
mcmc_accrate = np.mean((mcmc_lambda[1:] != mcmc_lambda[:-1]))
# Save the chains as attributes
self.mcmc_beta = mcmc_beta
self.mcmc_sigma = mcmc_sigma
self.mcmc_dforecast = mcmc_Dforecast
self.mcmc_lambda = mcmc_lambda
self.mcmc_psi = mcmc_psi
self.mcmc_theta = mcmc_theta
self.mcmc_miu = mcmc_miu
self.mcmc_accrate = mcmc_accrate
def _logmlvar_formin(self, par):
"""
This function computes the log-posterior (or the logML if hyperpriors=0),
the posterior mode of the coefficients and the covariance matrix of the
residuals of the BVAR of Giannone, Lenza and Primiceri (2012)
"""
# The following avoids the warning "referenced before assignment"
theta = None
miu = None
# hyperparameters
lambda_ = self.lambda_min + (self.lambda_max - self.lambda_min) / (1 + np.exp(-par[0]))
d = self.n + 2
if not self.mnpsi:
psi = self.SS * (d - self.n - 1)
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[1]))
else:
psi = self.psi_min + (self.psi_max - self.psi_min) / (1 + np.exp(-par[1:self.n + 1]))
if self.sur:
theta = self.theta_min + (self.theta_max - self.theta_min) / (1 + np.exp(-par[self.n + 1]))
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 2]))
else:
if self.noc:
miu = self.miu_min + (self.miu_max - self.miu_min) / (1 + np.exp(-par[self.n + 1]))
if not self.mnalpha:
alpha = 2
else: # self.mnalpha == 1
alpha = self.alpha_min + (self.alpha_max - self.alpha_min) / (1 + np.exp(-par[-1]))
# Setting up the priors
omega = np.zeros(self.k)
omega[0] = self.vc
for i in range(1, self.lags + 1):
omega[1 + (i - 1) * self.n: 1 + i * self.n] = \
(d - self.n - 1) * (lambda_ ** 2) * (1 / (i ** alpha)) / psi
# Prior scale matrix for the covariance of the shocks
PSI = np.diag(psi)
# dummy observations if sur and / or noc = 1
Td = 0
xdsur = np.array([]).reshape((0, self.k))
ydsur = np.array([]).reshape((0, self.n))
xdnoc = np.array([]).reshape((0, self.k))
ydnoc = np.array([]).reshape((0, self.n))
y = self.y.copy()
x = self.x.copy()
T = self.T
if self.sur:
xdsur = (1 / theta) * np.tile(self.y0, (1, self.lags))
xdsur = np.hstack((np.array([[1 / theta]]), xdsur))
ydsur = (1 / theta) * self.y0
y = np.vstack((y, ydsur))
x = np.vstack((x, xdsur))
Td = Td + 1
if self.noc:
ydnoc = (1 / miu) * np.diag(self.y0)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
ydnoc[self.pos, self.pos] = 0
xdnoc = (1 / miu) * np.tile(np.diag(self.y0), (1, self.lags))
xdnoc = np.hstack((np.zeros((self.n, 1)), xdnoc))
y = np.vstack((y, ydnoc))
x = np.vstack((x, xdnoc))
Td = Td + self.n
T = T + Td
# ===== OUTPUT ===== #
# Minnesota prior mean
b = np.zeros((self.k, self.n))
diagb = np.ones(self.n)
# Set to zero the prior mean on the first own lag for variables selected in the vector pos
if self.pos is not None:
diagb[self.pos] = 0
b[1:self.n + 1, :] = np.diag(diagb)
# posterior mode of the VAR coefficients
matA = x.T @ x + np.diag(1 / omega)
matB = x.T @ y + np.diag(1 / omega) @ b
betahat = np.linalg.solve(matA, matB) # np.solve runs more efficiently that inverting a gigantic matrix
# VAR residuals
epshat = y - x @ betahat
# Posterior mode of the covariance matrix
sigmahat = (epshat.T @ epshat + PSI + (betahat - b).T @ np.diag(1 / omega) @ (betahat - b))
sigmahat = sigmahat / (T + d + self.n + 1)
# logML
aaa = np.diag(np.sqrt(omega)) @ x.T @ x @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshat.T @ epshat + (betahat - b).T @ np.diag(1/omega) @
(betahat-b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
logML = - self.n * T * np.log(np.pi) / 2
logML = logML + sum(gammaln((T + d - np.arange(self.n)) / 2) - gammaln((d - np.arange(self.n)) / 2))
logML = logML - T * sum(np.log(psi)) / 2
logML = logML - self.n * sum(np.log(eigaaa)) / 2
logML = logML - (T + d) * sum(np.log(eigbbb)) / 2
if self.sur or self.noc:
yd = np.vstack((ydsur, ydnoc))
xd = np.vstack((xdsur, xdnoc))
# prior mode of the VAR coefficients
betahatd = b
# VAR residuals at the prior mode
epshatd = yd - xd @ betahatd
aaa = np.diag(np.sqrt(omega)) @ xd.T @ xd @ np.diag(np.sqrt(omega))
bbb = np.diag(1 / np.sqrt(psi)) @ (epshatd.T @ epshatd + (betahatd - b).T @ np.diag(1 / omega) @
(betahatd - b)) @ np.diag(1 / np.sqrt(psi))
eigaaa = np.linalg.eig(aaa)[0].real
eigaaa[eigaaa < 1e-12] = 0
eigaaa = eigaaa + 1
eigbbb = np.linalg.eig(bbb)[0].real
eigbbb[eigbbb < 1e-12] = 0
eigbbb = eigbbb + 1
# normalizing constant
norm = - self.n * Td * | np.log(np.pi) | numpy.log |
import numpy as np
import os
USE_CONV2D_MATMUL = True
USE_NATIVE = bool(os.getenv('REVDIFF_NATIVE'))
if USE_NATIVE:
import obnum
def my_expand_dims3(x, size):
y = np.empty((x.shape[0], size, x.shape[1]), dtype=np.float32)
for i in range(x.shape[0]):
for j in range(size):
for k in range(x.shape[1]):
y[i, j, k] = x[i, k]
return y
def my_conv2d_naive_val(X, K, n, f, y, x):
res = 0
for c in range(K.shape[1]):
for i in range(K.shape[2]):
for j in range(K.shape[3]):
res += X[n, c, y+i, x+j] * K[f, c, i, j]
return res
def my_conv2d_naive(X, K, sh, sw):
if USE_CONV2D_MATMUL:
return my_conv2d_matmul(X, K, sh, sw)
h_y = int((X.shape[2] - K.shape[2]) / sh + 1)
w_y = int((X.shape[3] - K.shape[3]) / sw + 1)
Y = np.empty((X.shape[0], K.shape[0], h_y, w_y), dtype=np.float32)
for n in range(X.shape[0]):
for f in range(K.shape[0]):
for y in range(h_y):
for x in range(w_y):
Y[n, f, y, x] = my_conv2d_naive_val(X, K, n, f, y*sh, x*sw)
return Y
def x2mat(X, K, sh, sw):
hy = int((X.shape[2] - K.shape[2]) / sh + 1)
wy = int((X.shape[3] - K.shape[3]) / sw + 1)
mx = np.empty((X.shape[0], hy, wy,
X.shape[1] * K.shape[2] * K.shape[3]), dtype=np.float32)
for n in range(X.shape[0]):
for i in range(hy):
for j in range(wy):
v = X[n, :, i*sh:i*sh+K.shape[2], j*sw:j*sw+K.shape[3]]
mx[n, i, j] = v.reshape(-1)
return mx.reshape(-1, mx.shape[3])
def my_conv2d_matmul(X, K, sh, sw):
hy = int((X.shape[2] - K.shape[2]) / sh + 1)
wy = int((X.shape[3] - K.shape[3]) / sw + 1)
mX = x2mat(X, K, sh, sw)
mK = K.reshape(K.shape[0], -1)
mY = mK @ mX.T
Y = mY.reshape(K.shape[0], X.shape[0], hy, wy)
Y = np.transpose(Y, (1, 0, 2, 3))
return Y
def my_pad_tensor(X, ptop, pbot, pleft, pright):
Y = np.zeros((X.shape[0], X.shape[1], X.shape[2] + ptop + pbot,
X.shape[3] + pleft + pright)).astype(np.float32)
Y[:, :, ptop:ptop+X.shape[2], pleft:pleft+X.shape[3]] = X
return Y
def my_conv2d_padded_naive(X, K, sh, sw, ph, pw):
return my_conv2d_naive(my_pad_tensor(X, ph, ph, pw, pw), K, sh, sw)
def my_stride0(X, h, w):
Y = np.zeros((X.shape[0], X.shape[1],
1 + (h + 1) * (X.shape[2] - 1),
1 + (w + 1) * (X.shape[3] - 1))).astype(np.float32)
for i1 in range(X.shape[0]):
for i2 in range(X.shape[1]):
for i3 in range(X.shape[2]):
for i4 in range(X.shape[3]):
Y[i1, i2, i3 * (h+1), i4 * (w+1)] = X[i1, i2, i3, i4]
return Y
def my_rot180(X):
Y = np.empty(X.shape, dtype=np.float32)
for i1 in range(X.shape[0]):
for i2 in range(X.shape[1]):
for i3 in range(X.shape[2]):
for i4 in range(X.shape[3]):
Y[i1, i2, i3, i4] = X[i1, i2,
X.shape[2] - i3 - 1,
X.shape[3] - i4 - 1]
return Y
def my_conv2d_padded_dk_naive(X, dY, sh, sw, ph, pw):
Xtr = np.transpose(my_pad_tensor(X, ph, ph, pw, pw), (1, 0, 2, 3))
f_dY = np.transpose(dY, (1,0,2,3))
f_dY = my_stride0(f_dY, sh - 1, sw - 1)
o_dK = my_conv2d_naive(Xtr, f_dY, 1, 1)
return np.transpose(o_dK, (1,0,2,3))
def my_conv2d_padded_dx_naive(K, dY, sh, sw, ph, pw):
pdY = my_pad_tensor(my_stride0(dY, sh - 1, sw - 1),
K.shape[2] - 1, K.shape[2] - 1,
K.shape[3] - 1, K.shape[3] - 1)
K180 = np.transpose(my_rot180(K), (1, 0, 2, 3))
dX_full = my_conv2d_naive(pdY, K180, 1, 1)
dX = dX_full[:,:,ph:dX_full.shape[2]-ph, pw:dX_full.shape[3]-pw]
return dX
def my_conv2d_bias_add(X, b):
return np.transpose(np.transpose(X, (0, 3, 2, 1)) + b, (0, 3, 2, 1))
def my_maxpool(X, kh, kw, sh, sw):
hy = int((X.shape[2] - kh) / sh) + 1
wy = int((X.shape[3] - kw) / sw) + 1
Y = np.empty((X.shape[0], X.shape[1], hy, wy), dtype=np.float32)
for n in range(X.shape[0]):
for c in range(X.shape[1]):
for i in range(hy):
for j in range(wy):
Y[n, c, i, j] = np.max(X[n, c, i*sh:i*sh+kh,
j*sw:j*sw+kw])
return Y
def my_maxpool_dk(X, Y, dout, kh, kw, sh, sw):
hy = int((X.shape[2] - kh) / sh) + 1
wy = int((X.shape[3] - kw) / sw) + 1
dX = np.zeros(X.shape).astype(np.float32)
for n in range(Y.shape[0]):
for c in range(Y.shape[1]):
for i in range(Y.shape[2]):
for j in range(Y.shape[3]):
m = Y[n, c, i, j]
for xi in range(i*sh,i*sh+kh):
for xj in range(j*sw,j*sw+kw):
if X[n, c, xi, xj] == m:
dX[n, c, xi, xj] += dout[n, c, i, j]
return dX
def to_node(x):
if isinstance(x, (int, float)):
x = np.array(x).astype(np.float32)
if isinstance(x, (np.ndarray, np.generic)):
return Val(x.astype(np.float32))
elif isinstance(x, Node):
return x
else:
raise Exception('to_node(x): x is of bad type')
def to_nodes(xs):
return (to_node(x) for x in xs)
class Node:
def __init__(self, shape, name, preds):
self.shape = shape
self.name = name
self.preds = list(preds)
self.succs = []
self.grads = dict()
self.value = None
self.native = False
for p in preds: p.succs.append(self)
def size(self):
res = 1
for x in self.shape: res *= x
return res
def has_native(self):
return self.native and USE_NATIVE
'''
Eval the preprocessors
Fix them to be usable by obnum
'''
def eval_preds_nat(self):
def prep(x):
x = x.eval()
if not x.flags['WRITEABLE']:
x = np.array(x, dtype=np.float32)
if not x.flags['C_CONTIGUOUS']:
s = tuple(x.shape)
x = x.reshape(-1).reshape(s)
return x
return [prep(x) for x in self.preds]
'''
Compute the value of the node
'''
def eval(self):
if self.value is None:
self.value = self.compute_native() if self.has_native() else self.compute_value()
return self.value
'''
Discard the value of the node. must be recomputed
'''
def discard(self):
for s in self.succs:
s.discard()
self.value = None
def compute_value(self):
raise Exception('Node::compute_value() not implemented')
def compute_native(self):
raise Exception('Node::compute_native() not implemented')
'''
Check if x is in the succs nodes of self
'''
def is_ancestor_of(self, x):
if self == x:
return True
for s in self.succs:
if s.is_ancestor_of(x): return True
return False
'''
Build and return a new node to compute the gradent
@param pred - the predecessor to build gradient relative to it
@param dout - the gradient for this node
'''
def get_grad(self, pred, dout):
raise Exception('Node::get_grad(pred, dout) not implemnted')
def fun_str(self):
res = self.name + '('
for i in range(len(self.preds)):
if i > 0: res += ', '
res += self.preds[i].fun_str()
res += ')'
return res
def __add__(self, other):
return build_vadd(self, other)
def __sub__(self, other):
return build_vsub(self, other)
def __mul__(self, other):
return build_vmul(self, other)
def __truediv__(self, other):
return build_vdiv(self, other)
def __neg__(self):
return build_vneg(self)
def __radd__(other, self):
return build_vadd(other, self)
def __rsub__(self, other):
return build_vsub(other, self)
def __rmul__(self, other):
return build_vmul(other, self)
def __rtruediv__(self, other):
return build_vdiv(other, self)
class Val(Node):
def __init__(self, x):
super().__init__(x.shape, 'val', [])
self.value = x
def get_grad(self, pred, dout):
return dout
def fun_str(self):
return str(self.shape)
def update(self, new_val):
self.discard()
self.value = new_val
def compute_value(self):
raise Exception('Val::compte_value() should never be called: {}'.format(self.shape))
class Vadd(Node):
def __init__(self, x, y):
super().__init__(x.shape, 'vadd', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() + self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vadd(a, b, out)
return out
def get_grad(self, pred, dout):
return dout
class Vsub(Node):
def __init__(self, x, y):
super().__init__(x.shape, 'sub', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() - self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vsub(a, b, out)
return out
def get_grad(self, pred, dout):
if pred == self.preds[0]:
return dout
else:
return - dout
class Vmul(Node):
def __init__(self, x, y):
super().__init__(x.shape, 'vmul', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() * self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vmul(a, b, out)
return out
def get_grad(self, pred, dout):
if self.preds[0] == pred:
return self.preds[1] * dout
else:
return self.preds[0] * dout
class Vdiv(Node):
def __init__(self, x, y):
super().__init__(x.shape, 'vdiv', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() / self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vdiv(a, b, out)
return out
def get_grad(self, pred, dout):
if self.preds[0] == pred:
return dout / self.preds[1]
else:
return - dout * self.preds[0] / (self.preds[1] * self.preds[1])
class Vneg(Node):
def __init__(self, x):
super().__init__(x.shape, 'neg', [x])
self.native = True
def compute_value(self):
return - self.preds[0].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, = self.eval_preds_nat()
obnum.vneg(a, out)
return out
def get_grad(self, pred, dout):
return - dout
class Vsadd(Node):
def __init__(self, x, y):
super().__init__(y.shape, 'vsadd', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() + self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vsadd(float(a), b, out)
return out
def get_grad(self, pred, dout):
if self.preds[0] == pred:
return op_sum(dout, axis=0)
else:
return dout
class Vsmul(Node):
def __init__(self, x, y):
super().__init__(y.shape, 'vsmul', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() * self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vsmul(float(a), b, out)
return out
def get_grad(self, pred, dout):
if self.preds[0] == pred:
return build_dot_vv(dout, self.preds[1])
else:
return build_vsmul(self.preds[0], dout)
class Vsdiv(Node):
def __init__(self, x, y):
super().__init__(y.shape, 'vsdiv', [x, y])
self.native = True
def compute_value(self):
return self.preds[0].eval() / self.preds[1].eval()
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
a, b = self.eval_preds_nat()
obnum.vsdiv(float(a), b, out)
return out
def get_grad(self, pred, dout):
if self.preds[0] == pred:
return build_dot_vv(dout, build_vsdiv(1, self.preds[1]))
else:
return build_vsmul(-self.preds[0], dout) / (pred * pred)
class Vexp(Node):
def __init__(self, x):
super().__init__(x.shape, 'vexp', [x])
self.native = True
def compute_native(self):
out = np.empty(self.shape, dtype=np.float32)
x, = self.eval_preds_nat()
obnum.vexp(x, out)
return out
def compute_value(self):
return np.exp(self.preds[0].eval())
def get_grad(self, pred, dout):
return dout * build_vexp(pred)
class Vlog(Node):
def __init__(self, x):
super().__init__(x.shape, 'vlog', [x])
self.native = True
def compute_native(self):
out = | np.empty(self.shape, dtype=np.float32) | numpy.empty |
from __future__ import print_function
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
from scipy.special import erfinv as ierf
from scipy.linalg import sqrtm
try:
from FixedBinInterpolator import FixedBinInterpolator
except:
from .FixedBinInterpolator import FixedBinInterpolator
class FastQuantileLayer ( tf.keras.layers.Layer ) :
"""
Creates a keras layer to emulate the behaviour of
scikit-learn QuantileTransformer.
"""
def __init__ (self,
n_quantiles = 50,
n_samples = 200,
output_distribution='uniform',
default_to_inverse = False,
numpy_dtype = np.float32,
verbose = False,
decorrelate = False,
**kwargs
):
"""
n_quantiles : int (default: 100)
Number of quantiles to be computed. It corresponds to
the number of landmarks used to discretize the cumulative
density function.
n_sample : int (default: 5000)
Number of points used to sample the transforms.
Larger values will result in slower evaluation but more
accurate function representation and inversion.
output_distribution : string (default: 'uniform')
Marginal distribution for the transformed data.
The choices are 'uniform' (default) or 'normal'.
The normal distribution is truncated.
dtype : numpy data type (default: np.float32)
Data type of the expected input
decorrelate : bool
If true, after the quantile transform, a linear transform is applied
to remove the correlation between variables
default_to_inverse : bool
If default_to_inverse is True, and inverse is explicitely specified
when applying the layer.
"""
self._Nbins = n_quantiles
self._Nsamples = n_samples
self._outDist = output_distribution
self.default_to_inverse = default_to_inverse
self.numpy_dtype = numpy_dtype
self.verbose = verbose
self.decorrelate = decorrelate
self.fwdTransforms_ = []
self.bwdTransforms_ = []
self.mean_transformed = np.array([])
self.covariance_matrix = np.array([])
self.inverse_covmat = np.array([])
tf.keras.layers.Layer.__init__ ( self, kwargs )
def fit ( self, X, y = None ):
"""
Creates the tensorflow interpolator used to transform the
distribution to either a uniform or normal distribution.
"""
rank = len(X.shape)
if rank == 1: # single variable
self._fit_column ( X, y )
elif rank == 2: # dataset
for iCol in range ( X.shape[1] ):
self._fit_column ( X[:,iCol], y )
else:
raise ValueError ("Expected a numpy array of rank 1 or 2, got %d"%rank)
if rank == 2 and self.decorrelate:
t = self.fwdTransforms_
tX = np.stack([
np.interp ( X[:,i], np.linspace(t[i].x_min, t[i].x_max, len(t[i].y_values)), t[i].y_values)
for i in range(X.shape[1]) ])
mean = np.mean ( tX, axis=1 )
covmat = np.cov ( tX )
invcov = np.linalg.inv ( covmat )
self.mean_transformed = mean.astype(self.numpy_dtype)
self.covariance_matrix = sqrtm(covmat).astype(self.numpy_dtype)
self.inverse_covmat = sqrtm(invcov).astype(self.numpy_dtype)
return self
def build ( self, input_shape ):
tf.keras.layers.Layer.build ( self, input_shape )
def _fit_column ( self, X, y=None ):
"""
Internal. Creates the interpolator for a single variable
"""
y = np.linspace ( 0, 1, self._Nbins )
xq = np.quantile ( X, y )
if self._outDist == 'normal' :
y = ierf ( np.clip(2.*y - 1.,-0.99999, 0.99999)) * np.sqrt(2)
self.fwdTransforms_ . append (
FixedBinInterpolator ( xq[0], xq[-1],
np.interp ( np.linspace(xq[0], xq[-1], self._Nsamples), xq, y ).astype(self.numpy_dtype)
)
)
if self._outDist == 'uniform':
self.bwdTransforms_ . append (
FixedBinInterpolator ( y[0], y[-1], xq.astype(self.numpy_dtype) )
)
else:
self.bwdTransforms_ . append (
FixedBinInterpolator ( y[0], y[-1],
np.interp ( np.linspace(y[0], y[-1], self._Nsamples), y, xq ).astype(self.numpy_dtype)
)
)
def transform ( self, X, inverse = False, force_decorrelate = None ) :
"""
Apply the tensorflow graph
"""
if self.default_to_inverse:
inverse = not inverse
transf = self.bwdTransforms_ if inverse else self.fwdTransforms_
rank = len(X.shape)
decorrelate = force_decorrelate if force_decorrelate is not None else self.decorrelate
if rank != 2: self.decorrelate = decorrelate = False
if not len(transf):
raise RuntimeError ( "QuantileTransformTF was not initialized. Run qtf.fit(numpy_dataset)." )
if self.verbose:
print ("Expected %d columns, got %d." % ( len(transf), X.shape[1]) )
if inverse and decorrelate:
X = tf.matmul ( X, self.covariance_matrix ) + self.mean_transformed
if rank == 1:
tX = transf[0].apply ( X[:,i] )
elif rank == 2:
tX = tf.stack (
[ transf[i].apply ( X[:,i] ) for i in range(X.shape[1]) ],
axis=1
)
if not inverse and decorrelate:
tX = tf.matmul ( tX - self.mean_transformed , self.inverse_covmat )
return tX
def call ( self, X ):
"""
Service function to call transform
"""
return self.transform ( X )
def get_inverse ( self ):
"""
Return a clone of this layer.
"""
new_layer = self.from_config ( self . get_config() )
new_layer . default_to_inverse = not new_layer . default_to_inverse
return new_layer
def get_config ( self ):
"""
Returns the configuration dictionary.
"""
cfg = tf.keras.layers.Layer.get_config ( self )
cfg . update ( dict(
_Nbins = int(self._Nbins) ,
_Nsamples = int(self._Nsamples ) ,
_outDist = str(self._outDist) ,
numpy_dtype = str(np.dtype(self.numpy_dtype).name) ,
default_to_inverse = bool(self.default_to_inverse) ,
decorrelate = bool(self.decorrelate),
mean_transformed = self.mean_transformed.tolist(),
covariance_matrix = self.covariance_matrix.tolist(),
inverse_covmat = self.inverse_covmat.tolist(),
direct_transforms = [
transform.get_config() for transform in self.fwdTransforms_
],
inverse_transforms = [
transform.get_config() for transform in self.bwdTransforms_
],
))
return cfg
@classmethod
def from_config ( cls, cfg ):
"""
Returns the configuration dictionary.
"""
newLayer = FastQuantileLayer()
newLayer._Nbins = cfg [ '_Nbins' ]
newLayer._Nsamples = cfg [ '_Nsamples' ]
newLayer.numpy_dtype = cfg [ 'numpy_dtype']
newLayer.default_to_inverse = cfg [ 'default_to_inverse' ]
newLayer.decorrelate = bool(cfg [ 'decorrelate' ])
newLayer.mean_transformed = np.array(cfg [ 'mean_transformed' ]).astype(newLayer.numpy_dtype)
newLayer.covariance_matrix = np.array(cfg [ 'covariance_matrix' ]).astype(newLayer.numpy_dtype)
newLayer.inverse_covmat = np.array(cfg [ 'inverse_covmat' ]).astype(newLayer.numpy_dtype)
newLayer.fwdTransforms_ = []
newLayer.bwdTransforms_ = []
for transform in cfg [ 'direct_transforms' ]:
newLayer.fwdTransforms_ . append (
FixedBinInterpolator ( transform['x_min'], transform['x_max'],
np.array(transform['y_values'], dtype=transform ['dtype'] ))
)
for transform in cfg [ 'inverse_transforms' ]:
newLayer.bwdTransforms_ . append (
FixedBinInterpolator ( transform['x_min'], transform['x_max'],
np.array(transform['y_values'], dtype=transform ['dtype'] ))
)
return newLayer
def compute_output_shape ( self, input_shape ):
return input_shape
if __name__ == '__main__':
dataset = np.c_[
np.random.uniform ( 0., 1., 1000) ,
np.random.uniform ( -5., 50., 1000) ,
]
th = np.pi / 5.
rotmat = np.array([[np.cos(th), | np.sin(th) | numpy.sin |
# Copyright (c) Facebook, Inc. and its affiliates.
import itertools
import json
import logging
import numpy as np
import os
from collections import OrderedDict
import PIL.Image as Image
import pycocotools.mask as mask_util
import torch
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.utils.comm import all_gather, is_main_process, synchronize
from detectron2.utils.file_io import PathManager
from .evaluator import DatasetEvaluator
class SemSegEvaluator(DatasetEvaluator):
"""
Evaluate semantic segmentation metrics.
"""
def __init__(self, dataset_name, distributed=True, output_dir=None, *, num_classes=None, ignore_label=None, write_outputs=False):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
distributed (bool): if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): an output directory to dump results.
num_classes, ignore_label: deprecated argument
"""
self._logger = logging.getLogger(__name__)
if num_classes is not None:
self._logger.warn(
"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
)
if ignore_label is not None:
self._logger.warn(
"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
)
self._dataset_name = dataset_name
self._distributed = distributed
self._output_dir = output_dir
self._write_outputs = write_outputs
self._cpu_device = torch.device("cpu")
self.input_file_to_gt_file = {
dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
for dataset_record in DatasetCatalog.get(dataset_name)
}
meta = MetadataCatalog.get(dataset_name)
# Dict that maps contiguous training ids to COCO category ids
try:
c2d = meta.stuff_dataset_id_to_contiguous_id
self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
except AttributeError:
self._contiguous_id_to_dataset_id = None
self._class_names = meta.stuff_classes
self._num_classes = len(meta.stuff_classes)
if num_classes is not None:
assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
def reset(self):
self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
self._predictions = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a model.
It is a list of dicts. Each dict corresponds to an image and
contains keys like "height", "width", "file_name".
outputs: the outputs of a model. It is either list of semantic segmentation predictions
(Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
segmentation prediction in the same format.
"""
from cityscapesscripts.helpers.labels import trainId2label
pred_output = os.path.join(self._output_dir, 'predictions')
if not os.path.exists(pred_output):
os.makedirs(pred_output)
pred_colour_output = os.path.join(self._output_dir, 'colour_predictions')
if not os.path.exists(pred_colour_output):
os.makedirs(pred_colour_output)
for input, output in zip(inputs, outputs):
output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
pred = np.array(output, dtype=np.uint8)
pred64 = np.array(output, dtype=np.int64) # to use it on bitcount for conf matrix
with PathManager.open(self.input_file_to_gt_file[input["file_name"]], "rb") as f:
gt = np.array(Image.open(f), dtype=np.int64)
gt[gt == self._ignore_label] = self._num_classes
self._conf_matrix += np.bincount(
(self._num_classes + 1) * pred64.reshape(-1) + gt.reshape(-1),
minlength=self._conf_matrix.size,
).reshape(self._conf_matrix.shape)
if self._write_outputs:
file_name = input["file_name"]
basename = os.path.splitext(os.path.basename(file_name))[0]
pred_filename = os.path.join(pred_output, basename + '.png')
Image.fromarray(pred).save(pred_filename)
# colour prediction
output = output.numpy()
pred_colour_filename = os.path.join(pred_colour_output, basename + '.png')
pred_colour = 255 * np.ones([output.shape[0],output.shape[1],3], dtype=np.uint8)
for train_id, label in trainId2label.items():
#if label.ignoreInEval:
# continue
#pred_colour[np.broadcast_to(output == train_id, pred_colour.shape)] = 0 #label.color
pred_colour[(output == train_id),0] = label.color[0]
pred_colour[(output == train_id),1] = label.color[1]
pred_colour[(output == train_id),2] = label.color[2]
Image.fromarray(pred_colour).save(pred_colour_filename)
#self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
def evaluate(self):
"""
Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
* Mean intersection-over-union averaged across classes (mIoU)
* Frequency Weighted IoU (fwIoU)
* Mean pixel accuracy averaged across classes (mACC)
* Pixel Accuracy (pACC)
"""
if self._distributed:
synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
self._predictions = all_gather(self._predictions)
self._predictions = list(itertools.chain(*self._predictions))
if not is_main_process():
return
self._conf_matrix = np.zeros_like(self._conf_matrix)
for conf_matrix in conf_matrix_list:
self._conf_matrix += conf_matrix
'''if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._predictions))'''
print(self._conf_matrix)
acc = np.full(self._num_classes, np.nan, dtype=np.float)
iou = np.full(self._num_classes, np.nan, dtype=np.float)
tp = self._conf_matrix.diagonal()[:-1].astype(np.float)
pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(np.float)
class_weights = pos_gt / np.sum(pos_gt)
pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(np.float)
acc_valid = pos_gt > 0
acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
iou_valid = (pos_gt + pos_pred) > 0
union = pos_gt + pos_pred - tp
iou[acc_valid] = tp[acc_valid] / union[acc_valid]
macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
miou = np.sum(iou[acc_valid]) / np.sum(iou_valid)
fiou = np.sum(iou[acc_valid] * class_weights[acc_valid])
pacc = np.sum(tp) / | np.sum(pos_gt) | numpy.sum |
from matplotlib import pyplot as plt
import cv2
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
import scipy.io as sio
import cv2
import json
import openslide
from skimage.measure import label, regionprops
from misc.wsi_handler import get_file_handler
from misc.viz_utils import visualize_instances_dict
# wsi_path = "/home/user/Documents/Master/tcga_test/gdc_download_20211215_084557.672401/9baa25bd-c9d1-4280-bc17-43b90fafd4e0/three.svs"
wsi_path = "/media/user/easystore/HRD-Subset/DigitalSlide_A1M_2S_1_20190127153640117/DigitalSlide_A1M_2S_1_20190127153640117.svs"
wsi_basename = wsi_path.split("/")[-1].split(".svs")[0]
print(wsi_basename)
wsi_json_path = "/media/user/easystore/HRD-Subset/DigitalSlide_A1M_2S_1_20190127153640117/results/d7ca4f688ae04bad9627b5b14956881d"
wsi_one = openslide.open_slide(wsi_path)
print(wsi_one.dimensions)
wsi_png = wsi_basename + ".png"
mask_path_wsi = os.path.join(wsi_json_path, 'mask', wsi_png)
thumb_path_wsi = os.path.join(wsi_json_path, 'thumb', wsi_basename + '.png')
thumb = cv2.cvtColor(cv2.imread(thumb_path_wsi), cv2.COLOR_BGR2RGB)
mask = cv2.cvtColor(cv2.imread(mask_path_wsi), cv2.COLOR_BGR2RGB)
label_mask = label(mask)
props = regionprops(label_mask)
areas = []
for prop in props:
areas.append(prop.area)
# get largest object
max_prop = props[np.argmax(areas)]
bbox = max_prop.bbox
print(bbox)
top_left = [bbox[0], bbox[1]]
bot_right = [bbox[3], bbox[4]]
y_mask_ratio = top_left[0] / mask.shape[0]
y_original = int(wsi_one.dimensions[1]*y_mask_ratio)
y_original += 15000
x_mask_ratio = top_left[1] / mask.shape[1]
x_original = int(wsi_one.dimensions[0]*x_mask_ratio)
x_original += 16000
# plot the low resolution thumbnail along with the tissue mask
# plt.figure(figsize=(15,8))
# plt.subplot(1,2,1)
# plt.imshow(thumb)
# plt.axis('off')
# plt.title('Thumbnail', fontsize=25)
# plt.subplot(1,2,2)
# plt.imshow(mask)
# plt.axis('off')
# plt.title('Mask', fontsize=25)
# plt.show()
json_path_wsi = os.path.join(wsi_json_path, 'json', wsi_basename + '.json')
bbox_list_wsi = []
centroid_list_wsi = []
contour_list_wsi = []
type_list_wsi = []
patch_size = 1000
# add results to individual lists
with open(json_path_wsi) as json_file:
data = json.load(json_file)
mag_info = data['mag']
nuc_info = data['nuc']
for inst in nuc_info:
inst_info = nuc_info[inst]
inst_centroid = inst_info['centroid']
if inst_centroid[0] > x_original and inst_centroid[1] > y_original and inst_centroid[0] < x_original+patch_size and inst_centroid[1] < y_original+patch_size:
centroid_list_wsi.append(inst_centroid)
inst_contour = inst_info['contour']
contour_list_wsi.append(inst_contour)
inst_bbox = inst_info['bbox']
bbox_list_wsi.append(inst_bbox)
inst_type = inst_info['type']
type_list_wsi.append(inst_type)
# keys = nuc_info.keys()
print("Kept Nuclei: ", len(centroid_list_wsi))
print(mag_info)
# define the region to select
x_tile = x_original
y_tile = y_original
w_tile = patch_size
h_tile = patch_size
coords = (x_tile, y_tile)
patch_level = -1
# load the wsi object and read region
wsi_ext =".svs"
# wsi_obj = get_file_handler(wsi_path, wsi_ext)
# wsi = openslide.open_slide(wsi_path)
# print(wsi.dimensions)
# wsi_tile = wsi.read_region(coords, patch_level, tuple([w_tile, h_tile]))
wsi_obj = get_file_handler(wsi_path, wsi_ext)
wsi_obj.prepare_reading(read_mag=mag_info)
wsi_tile = wsi_obj.read_region((x_tile,y_tile), (w_tile,h_tile))
coords_xmin = x_tile
coords_xmax = x_tile + w_tile
coords_ymin = y_tile
coords_ymax = y_tile + h_tile
tile_info_dict = {}
count = 0
for idx, cnt in enumerate(contour_list_wsi):
cnt_tmp = np.array(cnt)
cnt_tmp = cnt_tmp[(cnt_tmp[:,0] >= coords_xmin) & (cnt_tmp[:,0] <= coords_xmax) & (cnt_tmp[:,1] >= coords_ymin) & (cnt_tmp[:,1] <= coords_ymax)]
label = str(type_list_wsi[idx])
if cnt_tmp.shape[0] > 0:
cnt_adj = np.round(cnt_tmp - np.array([x_tile,y_tile])).astype('int')
tile_info_dict[idx] = {'contour': cnt_adj, 'type':label}
count += 1
type_info = {
"0" : ["nolabe", [0 , 0, 0]],
"1" : ["neopla", [255, 0, 0]],
"2" : ["inflam", [0 , 255, 0]],
"3" : ["connec", [0 , 0, 255]],
"4" : ["necros", [255, 255, 0]],
"5" : ["no-neo", [255, 165, 0]]
}
fig = plt.figure(figsize=(100,80))
overlaid_output = visualize_instances_dict(wsi_tile, tile_info_dict, type_colour=type_info, line_thickness=2)
plt.imshow(overlaid_output)
plt.axis('off')
plt.title('Segmentation Overlay')
for i in type_info:
label = type_info[i][0]
color = | np.array(type_info[i][1]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 13 00:25:40 2019
@author: MAHDI
"""
import sys
import numpy as np
def PrintSqMatrix(a, n):
for i in range(n):
for j in range(n-1):
print("%.4f" % (a[i][j]), end=' ')
print("%.4f" % (a[i][n-1]))
return
def PrintColumnMatrix(mat, n):
for i in range(n):
print("%.4f" % (mat[i]))
return
def DecomposeLU(a, n):
n=int(n)
l = np.eye(n)
u = np.array(a)
# print('l += ' ,l,a )
# print(a[0][0])
for k in range(n):
for i in range(k+1, n):
# =============================================================================
# if np.abs(u[k][k])<=1e-10:
# return 0
# =============================================================================
factor = u[i][k]/u[k][k]
l[i][k] = factor
for j in range(n):
u[i][j] = u[i][j]-factor*u[k][j]
# =============================================================================
# print(l)
# print(u)
# print(a)
#
# =============================================================================
return (l, u)
def Forwardsubstitution(l, b, n):
y = np.zeros(n)
# =============================================================================
# print(y)
# print(b)
# =============================================================================
for i in range(n):
sum = 0.0
for j in range(0, i):
sum += l[i][j]*y[j]
y[i] = (b[i]-sum)/l[i][i]
return y
def Backwardsubstitution(u, y, n):
x = np.zeros(n)
# =============================================================================
# print(y)
# print(b)
# =============================================================================
for i in range(n-1, -1, -1):
sum = 0.0
for j in range(i+1, n):
sum += u[i][j]*x[j]
x[i] = (y[i]-sum)/u[i][i]
return x
def Nosolution():
print("No unique solution")
return
def CheckSolution(u, n):
for i in range(n):
non_zero=False
for j in range(n):
if abs(u[i][j]) >= 1e-12:
non_zero=True
break
if non_zero == False :
return False
for i in range(n):
if abs(u[i][i])<=1e-12:
return False
return True
def Main():
inp = open('in1.txt')
sys.stdout = open('out1.txt', 'w')
N = int(inp.readline())
# print(n)
A = [list(map(float, inp.readline().strip().split(' '))) for i in range(N)]
A = np.array(A)
B = [float(inp.readline()) for i in range(N)]
B = | np.array(B) | numpy.array |
"""
Classes for lighting in renderer
Author: <NAME>
"""
import numpy as np
from autolab_core import RigidTransform
class Color(object):
WHITE = np.array([255, 255, 255])
BLACK = np.array([0, 0, 0])
RED = np.array([255, 0, 0])
GREEN = np.array([0, 255, 0])
BLUE = np.array([0, 0, 255])
class MaterialProperties(object):
""" Struct to encapsulate material properties for
OpenGL rendering.
Attributes
----------
color : :obj:`numpy.ndarray`
3-array of integers between 0 and 255
"""
def __init__(self, color=Color.WHITE,
ambient=0.2,
diffuse=0.8,
specular=0,
shininess=0):
# set params
self.color = np.array(color).astype(np.uint8)
self.ambient = ambient
self.diffuse = diffuse
self.specular = specular
self.shininess = shininess
def __str__(self):
s = ''
s += 'Color: %s\n' %(str(self.color))
s += 'Ambient: %f\n' %(self.ambient)
s += 'Diffuse: %f\n' %(self.diffuse)
s += 'Specular: %f\n' %(self.specular)
s += 'Shininess: %f\n' %(self.shininess)
return s
@property
def arr(self):
""" Returns the material properties as a contiguous numpy array. """
return np.r_[self.color,
self.ambient * np.ones(3), 1,
self.diffuse * | np.ones(3) | numpy.ones |
"""Spectral index distributions."""
import numpy as np
import frbpoppy.gen_dists as gd
def constant(value=1e40, shape=1):
"""Good for standard candles."""
return | np.full(shape, value) | numpy.full |
from collections import OrderedDict
import mmcv
import numpy as np
import torch
def calculate(gt, pred):
if gt.shape[0] == 0:
return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
if np.isnan(silog):
silog = 0
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return a1, a2, a3, abs_rel, rmse, log_10, rmse_log, silog, sq_rel
def metrics(gt, pred, interval1=20, interval2=60, max_depth=80):
mask = gt > 0
gt = gt[mask]
pred = pred[mask]
a1, a2, a3, abs_rel, rmse, log_10, rmse_log, silog, sq_rel = calculate(gt, pred)
# TODO: hack here to eval different distance:
mask_1 = gt <= interval1
mask_2 = gt > 0
mask = np.logical_and(mask_1, mask_2)
gt_l1 = gt[mask]
pred_l1 = pred[mask]
a1_l1, a2_l1, a3_l1, abs_rel_l1, rmse_l1, log_10_l1, rmse_log_l1, silog_l1, sq_rel_l1 = calculate(gt_l1, pred_l1)
mask_1 = gt <= interval2
mask_2 = gt > interval1
mask = np.logical_and(mask_1, mask_2)
gt_l2 = gt[mask]
pred_l2 = pred[mask]
a1_l2, a2_l2, a3_l2, abs_rel_l2, rmse_l2, log_10_l2, rmse_log_l2, silog_l2, sq_rel_l2 = calculate(gt_l2, pred_l2)
mask_1 = gt <= max_depth
mask_2 = gt > interval2
mask = np.logical_and(mask_1, mask_2)
gt_l3 = gt[mask]
pred_l3 = pred[mask]
a1_l3, a2_l3, a3_l3, abs_rel_l3, rmse_l3, log_10_l3, rmse_log_l3, silog_l3, sq_rel_l3 = calculate(gt_l3, pred_l3)
return a1, a2, a3, abs_rel, rmse, log_10, rmse_log, silog, sq_rel, \
a1_l1, a2_l1, a3_l1, abs_rel_l1, rmse_l1, log_10_l1, rmse_log_l1, silog_l1, sq_rel_l1, \
a1_l2, a2_l2, a3_l2, abs_rel_l2, rmse_l2, log_10_l2, rmse_log_l2, silog_l2, sq_rel_l2, \
a1_l3, a2_l3, a3_l3, abs_rel_l3, rmse_l3, log_10_l3, rmse_log_l3, silog_l3, sq_rel_l3
# hack for enhance interval evaluation
# def metrics(gt, pred, interval1=20, interval2=60, max_depth=80):
# mask = gt > 0
# gt = gt[mask]
# pred = pred[mask]
# a1, a2, a3, abs_rel, rmse, log_10, rmse_log, silog, sq_rel = calculate(gt, pred)
# temp = []
# # TODO: hack here to eval different distance:
# for index, begin in enumerate(range(80)):
# end = begin + 1
# mask_1 = gt <= end
# mask_2 = gt > begin
# mask = np.logical_and(mask_1, mask_2)
# gt_l1 = gt[mask]
# pred_l1 = pred[mask]
# a1, a2, a3, abs_rel, rmse, log_10, rmse_log, silog, sq_rel = calculate(gt_l1, pred_l1)
# temp.extend([a1, a2, a3, abs_rel, rmse, log_10, rmse_log, silog, sq_rel])
# return temp
def eval_metrics(gt, pred):
mask = gt > 0
gt = gt[mask]
pred = pred[mask]
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred) ** 2) / gt)
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(pred) - np.log(gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
silog=silog, sq_rel=sq_rel)
def pre_eval_to_metrics(pre_eval_results):
# convert list of tuples to tuple of lists, e.g.
# [(A_1, B_1, C_1, D_1), ..., (A_n, B_n, C_n, D_n)] to
# ([A_1, ..., A_n], ..., [D_1, ..., D_n])
pre_eval_results = tuple(zip(*pre_eval_results))
ret_metrics = OrderedDict({})
level_num = len(pre_eval_results) // 9
for i in range(level_num):
ret_metrics['a1_{}'.format("all" if i==0 else "l_{}".format(i))] = np.nanmean(pre_eval_results[i*9+0])
ret_metrics['a2_{}'.format("all" if i==0 else "l_{}".format(i))] = np.nanmean(pre_eval_results[i*9+1])
ret_metrics['a3_{}'.format("all" if i==0 else "l_{}".format(i))] = np.nanmean(pre_eval_results[i*9+2])
ret_metrics['abs_rel_{}'.format("all" if i==0 else "l_{}".format(i))] = np.nanmean(pre_eval_results[i*9+3])
ret_metrics['rmse_{}'.format("all" if i==0 else "l_{}".format(i))] = np.nanmean(pre_eval_results[i*9+4])
ret_metrics['log_10_{}'.format("all" if i==0 else "l_{}".format(i))] = np.nanmean(pre_eval_results[i*9+5])
ret_metrics['rmse_log_{}'.format("all" if i==0 else "l_{}".format(i))] = | np.nanmean(pre_eval_results[i*9+6]) | numpy.nanmean |
"""
The `nntm.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
# Author: <NAME> <<EMAIL>>
# License: MIT
import logging
import numbers
from collections.abc import Iterable
import numpy as np
import pandas as pd
from sklearn.model_selection import BaseCrossValidator
from sklearn.utils import indexable
from ..utils.validation import _num_samples
logger = logging.getLogger(__name__)
__all__ = ["PurgedKFold", "check_cv"]
class PurgedKFold(BaseCrossValidator):
"""Purged K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds. Training observations overlapping
in time with test observations are purged.
Optionally, the eras that immediately follow the test set can be
eliminated using the `embargo` argument.
Data is assumed to be contiguous (shuffle=False).
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
target_days : int, default=20
Days between the observation of samples and the target.
embargo : float between 0.0 and 1.0, default=None
Relative number of eras to be purged after every test set.
(`embargo` * `total_era_count`) eras are embargoed.
References
----------
.. [1] `<NAME> (2018). Advances in Financial Machine
Learning. Chapter 7 (Cross-Validation in Finance).`_
.. [2] `Super Massive Data Release: Deep Dive
<https://forum.numer.ai/t/super-massive-data-release-deep-dive/4053>`_
"""
def __init__(self, n_splits=5, target_days=20, embargo=None):
if not isinstance(n_splits, numbers.Integral):
raise ValueError(
"The number of folds must be of Integral type. "
f"`n_splits={n_splits}` of type {type(n_splits)} was passed."
)
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one "
"train/test split by setting `n_splits=2` or more, "
f"got `n_splits={n_splits}`."
)
if not isinstance(target_days, numbers.Integral):
raise ValueError(
"The number of target days must be of Integral type. "
f"`target_days={target_days}` of type {type(target_days)} was passed."
)
target_days = int(target_days)
if target_days % 5 != 0:
raise ValueError(
"The number of target days has to be a multiple of 5. "
f"`target_days={target_days}` was passed."
)
if embargo:
if not isinstance(embargo, float):
raise ValueError(
"Embargo must be of float type. "
f"`embargo={embargo}` of type {type(embargo)} was passed."
)
if not 0.0 < embargo < 1.0:
raise ValueError(
"Embargo must be between 0.0 and 1.0. "
f"`embargo={embargo}` was passed."
)
self.n_splits = n_splits
self.target_days = target_days
self.embargo = embargo
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where `n_samples` is the number of samples
and `n_features` is the number of features.
y : array-like of shape (n_samples,), default=None
The target variable for supervised learning problems.
groups : array-like of shape (n_samples,), default=None
Eras for the samples used while splitting the dataset into
train/test set. This parameter is not required when X is
a pandas DataFrame containing an `era` column.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if isinstance(X, np.ndarray) and groups is None:
raise ValueError("`groups` parameter is required when X is a numpy array")
if isinstance(X, pd.DataFrame) and groups is None and "era" not in X.columns:
raise ValueError("`groups` parameter is required when X has no era column")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
(
f"Cannot have number of splits n_splits={self.n_splits} greater "
f"than the number of samples: n_samples={n_samples}."
)
)
eras = np.fromiter(self._get_eras(X, groups=groups), dtype=int)
target_weeks = self.target_days // 5
eras_target_release = | np.array([era + target_weeks - 1 for era in eras]) | numpy.array |
####Please do not remove lines below####
from lmfit import Parameters
import numpy as np
import sys
import os
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('./Functions'))
sys.path.append(os.path.abspath('./Fortran_routines'))
from functools import lru_cache
####Please do not remove lines above####
####Import your modules below if needed####
from ff_ellipsoid import ff_ellipsoid_ml_asaxs
from Chemical_Formula import Chemical_Formula
from PeakFunctions import LogNormal, Gaussian
from Structure_Factors import hard_sphere_sf, sticky_sphere_sf
from utils import find_minmax, calc_rho, create_steps
from functools import lru_cache
import time
class Biphasic_Ellipsoid_Uniform: #Please put the class name same as the function name
def __init__(self, x=0, Np=10, flux=1e13, term='Total', dist='Gaussian', Energy=None, relement='Au', Nalf=200,
NrDep='False', norm=1.0, Rsig=0.0, sbkg=0.0, cbkg=0.0, abkg=0.0, D=1.0, phi=0.1, U=-1.0,
SF='None', mpar={'Phase_1':{'Material': ['Au', 'H2O'],
'Density': [19.32, 1.0],
'VolFrac': [1.0, 1.0],
'Rmoles': [1.0, 0.0],
'R': [1.0, 0.0],
'RzRatio':[1.0,1.0]},
'Phase_2': {'Material': ['Au', 'H2O'],
'Density': [19.32, 1.0],
'VolFrac': [1.0, 1.0],
'Rmoles': [1.0, 0.0],
'R': [1.0, 0.0],
'RzRatio': [1.0, 1.0]},
'Solvent': {'Material': ['H2O', 'H2O'],
'Density': [0.0, 1.0],
'VolFrac': [1.0, 1.0],
'Rmoles': [1.0, 0.0],
'R': [1.0, 0.0],
'RzRatio': [1.0, 1.0]
}}):
"""
Documentation
Calculates the Energy dependent form factor of multilayered oblate nanoparticles with different materials and different phases
x : Reciprocal wave-vector 'Q' inv-Angs in the form of a scalar or an array
relement : Resonant element of the nanoparticle. Default: 'Au'
Energy : Energy of X-rays in keV at which the form-factor is calculated. Default: None
Np : No. of points with which the size distribution will be computed. Default: 10
NrDep : Energy dependence of the non-resonant element. Default= 'False' (Energy independent), 'True' (Energy dependent)
dist : The probablity distribution fucntion for the radii of different interfaces in the nanoparticles. Default: Gaussian
Nalf : Number of azumuthal angle points for angular averaging
norm : The density of the nanoparticles in Molar (Moles/Liter)
sbkg : Constant incoherent background for SAXS-term
cbkg : Constant incoherent background for cross-term
abkg : Constant incoherent background for Resonant-term
flux : Total X-ray flux to calculate the errorbar to simulate the errorbar for the fitted data
term : 'SAXS-term' or 'Cross-term' or 'Resonant-term' or 'Total'
D : Hard Sphere Diameter
phi : Volume fraction of particles
U : The sticky-sphere interaction energy
SF : Type of structure factor. Default: 'None'
Rsig : Width of distribution of radii
mpar : Multi-parameter which defines the following including the solvent/bulk medium which is the last one. Default: 'H2O'
Material ('Materials' using chemical formula),
Density ('Density' in gm/cubic-cms),
Density of solvent ('SolDensity' in gm/cubic-cms) of the particular layer
Mole-fraction ('Rmoles') of resonant element in the material)
Radii ('R' in Angs), and
Height to Radii ratio ('RzRatio' ratio)
"""
if type(x) == list:
self.x = np.array(x)
else:
self.x = x
self.Nalf = Nalf
self.norm = norm
self.sbkg = sbkg
self.cbkg = cbkg
self.abkg = abkg
self.dist = dist
self.Np = Np
self.Energy = Energy
self.relement = relement
self.NrDep = NrDep
# self.rhosol=rhosol
self.flux = flux
self.D = D
self.phi = phi
self.U = U
self.Rsig = Rsig
self.__mpar__ = mpar # If there is any multivalued parameter
self.SF = SF
self.term = term
self.__Density__ = {}
self.__VolFrac__ = {}
self.__R__ = {}
self.__Rmoles__ = {}
self.__material__ = {}
self.__RzRatio__= {}
self.choices = {'dist': ['Gaussian', 'LogNormal'], 'NrDep': ['True', 'False'],
'SF': ['None', 'Hard-Sphere', 'Sticky-Sphere'],
'term': ['SAXS-term', 'Cross-term', 'Resonant-term',
'Total']} # If there are choices available for any fixed parameters
self.__cf__ = Chemical_Formula()
self.__fit__ = False
self.output_params = {}
self.output_params = {'scaler_parameters': {}}
self.__mkeys__=list(self.__mpar__.keys())
self.init_params()
def init_params(self):
"""
Define all the fitting parameters like
self.params.add('sig',value = 0, vary = 0, min = -np.inf, max = np.inf, expr = None, brute_step = None)
"""
self.params = Parameters()
self.params.add('norm', value=self.norm, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('D', value=self.D, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('phi', value=self.phi, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('sbkg', value=self.sbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('cbkg', value=self.cbkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('abkg', value=self.abkg, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('U', value=self.U, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
self.params.add('Rsig', value=self.Rsig, vary=0, min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
mkey1 = self.__mkeys__[0]
for key in self.__mpar__[mkey1].keys():
if key != 'Material':
for i in range(len(self.__mpar__[mkey1][key])):
self.params.add('__%s_%s_%03d' % (mkey1, key, i), value=self.__mpar__[mkey1][key][i], vary=0,
min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
for mkey in self.__mkeys__[1:]:
for key in self.__mpar__[mkey].keys():
if key != 'Material' and key != 'R':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d' % (mkey, key, i), value=self.__mpar__[mkey][key][i], vary=0,
min=-np.inf, max=np.inf, expr=None, brute_step=0.1)
elif key == 'R' or key=='RzRatio':
for i in range(len(self.__mpar__[mkey][key])):
self.params.add('__%s_%s_%03d' % (mkey, key, i), value=self.__mpar__[mkey][key][i], vary=0,
min=-np.inf, max=np.inf
, expr='__%s_%s_%03d' % (mkey1, key, i), brute_step=0.1)
@lru_cache(maxsize=10)
def calc_Rdist(self, R, Rsig, dist, N):
R = np.array(R)
totalR = np.sum(R[:-1])
if Rsig > 0.001:
fdist = eval(dist + '.' + dist + '(x=0.001, pos=totalR, wid=Rsig)')
if dist == 'Gaussian':
rmin, rmax = max(0.001, totalR - 5 * Rsig), totalR + 5 * Rsig
else:
rmin, rmax = max(0.001, np.exp(np.log(totalR) - 5 * Rsig)), np.exp( | np.log(totalR) | numpy.log |
#The DF of a tidal stream
import copy
import multiprocessing
import warnings
from pkg_resources import parse_version
import numpy
import scipy
from scipy import special, interpolate, integrate, optimize
_SCIPY_VERSION= parse_version(scipy.__version__)
if _SCIPY_VERSION < parse_version('0.10'): #pragma: no cover
from scipy.maxentropy import logsumexp
elif _SCIPY_VERSION < parse_version('0.19'): #pragma: no cover
from scipy.misc import logsumexp
else:
from scipy.special import logsumexp
from ..orbit import Orbit
from .df import df
from ..util import coords, fast_cholesky_invert, \
conversion, multi, plot, stable_cho_factor, ars
from ..util.conversion import physical_conversion, _APY_UNITS, _APY_LOADED
from ..actionAngle.actionAngleIsochroneApprox import dePeriod
from ..potential import flatten as flatten_potential
from ..util import galpyWarning
if _APY_LOADED:
from astropy import units
_INTERPDURINGSETUP= True
_USEINTERP= True
_USESIMPLE= True
# cast a wide net
_TWOPIWRAPS= numpy.arange(-4,5)*2.*numpy.pi
_labelDict= {'x': r'$X$',
'y': r'$Y$',
'z': r'$Z$',
'r': r'$R$',
'phi': r'$\phi$',
'vx':r'$V_X$',
'vy':r'$V_Y$',
'vz':r'$V_Z$',
'vr':r'$V_R$',
'vt':r'$V_T$',
'll':r'$\mathrm{Galactic\ longitude\, (deg)}$',
'bb':r'$\mathrm{Galactic\ latitude\, (deg)}$',
'dist':r'$\mathrm{distance\, (kpc)}$',
'pmll':r'$\mu_l\,(\mathrm{mas\,yr}^{-1})$',
'pmbb':r'$\mu_b\,(\mathrm{mas\,yr}^{-1})$',
'vlos':r'$V_{\mathrm{los}}\,(\mathrm{km\,s}^{-1})$'}
class streamdf(df):
"""The DF of a tidal stream"""
def __init__(self,sigv,progenitor=None,pot=None,aA=None,useTM=False,
tdisrupt=None,sigMeanOffset=6.,leading=True,
sigangle=None,
deltaAngleTrack=None,nTrackChunks=None,nTrackIterations=None,
progIsTrack=False,
ro=None,vo=None,
Vnorm=None,Rnorm=None,
R0=8.,Zsun=0.0208,vsun=[-11.1,8.*30.24,7.25],
multi=None,interpTrack=_INTERPDURINGSETUP,
useInterp=_USEINTERP,nosetup=False,nospreadsetup=False,
approxConstTrackFreq=False,useTMHessian=False,
custom_transform=None):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
sigv - radial velocity dispersion of the progenitor (can be Quantity)
tdisrupt= (5 Gyr) time since start of disruption (can be Quantity)
leading= (True) if True, model the leading part of the stream
if False, model the trailing part
progenitor= progenitor orbit as Orbit instance (will be re-integrated, so don't bother integrating the orbit before)
progIsTrack= (False) if True, then the progenitor (x,v) is actually the (x,v) of the stream track at zero angle separation; useful when initializing with an orbit fit; the progenitor's position will be calculated
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
useTM= (False) if set to an actionAngleTorus instance, use this to speed up calculations
sigMeanOffset= (6.) offset between the mean of the frequencies
and the progenitor, in units of the largest
eigenvalue of the frequency covariance matrix
(along the largest eigenvector), should be positive;
to model the trailing part, set leading=False
sigangle= (sigv/122/[1km/s]=1.8sigv in natural coordinates)
estimate of the angle spread of the debris initially (can be Quantity)
deltaAngleTrack= (None) angle to estimate the stream track over (rad; or can be Quantity)
nTrackChunks= (floor(deltaAngleTrack/0.15)+1) number of chunks to divide the progenitor track in
nTrackIterations= Number of iterations to perform when establishing the track; each iteration starts from a previous approximation to the track in (x,v) and calculates a new track based on the deviation between the previous track and the desired track in action-angle coordinates; if not set, an appropriate value is determined based on the magnitude of the misalignment between stream and orbit, with larger numbers of iterations for larger misalignments
interpTrack= (might change), interpolate the stream track while
setting up the instance (can be done by hand by
calling self._interpolate_stream_track() and
self._interpolate_stream_track_aA())
useInterp= (might change), use interpolation by default when
calculating approximated frequencies and angles
nosetup= (False) if True, don't setup the stream track and anything
else that is expensive
nospreadsetup= (False) if True, don't setup the spread around the stream track (only for nosetup is False)
multi= (None) if set, use multi-processing
Coordinate transformation inputs:
vo= (220) circular velocity to normalize velocities with [used to be Vnorm; can be Quantity]
ro= (8) Galactocentric radius to normalize positions with [used to be Rnorm; can be Quantity]
R0= (8) Galactocentric radius of the Sun (kpc) [can be different from ro; can be Quantity]
Zsun= (0.0208) Sun's height above the plane (kpc; can be Quantity)
vsun= ([-11.1,241.92,7.25]) Sun's motion in cylindrical coordinates (vR positive away from center) (can be Quantity)
custom_transform= (None) matrix implementing the rotation from (ra,dec) to a custom set of sky coordinates
approxConstTrackFreq= (False) if True, approximate the stream assuming that the frequency is constant along the stream (only works with useTM, for which this leads to a significant speed-up)
useTMHessian= (False) if True, compute the basic Hessian dO/dJ_prog using TM; otherwise use aA
OUTPUT:
object
HISTORY:
2013-09-16 - Started - Bovy (IAS)
2013-11-25 - Started over - Bovy (IAS)
"""
if ro is None and not Rnorm is None:
warnings.warn("WARNING: Rnorm keyword input to streamdf is deprecated in favor of the standard ro keyword", galpyWarning)
ro= Rnorm
if vo is None and not Vnorm is None:
warnings.warn("WARNING: Vnorm keyword input to streamdf is deprecated in favor of the standard vo keyword", galpyWarning)
vo= Vnorm
df.__init__(self,ro=ro,vo=vo)
sigv= conversion.parse_velocity(sigv,vo=self._vo)
self._sigv= sigv
if tdisrupt is None:
self._tdisrupt= 5./conversion.time_in_Gyr(self._vo,self._ro)
else:
self._tdisrupt= conversion.parse_time(tdisrupt,ro=self._ro,vo=self._vo)
self._sigMeanOffset= sigMeanOffset
if pot is None: #pragma: no cover
raise IOError("pot= must be set")
self._pot= flatten_potential(pot)
self._aA= aA
if not self._aA._pot == self._pot:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
self._check_consistent_units()
if useTM:
self._useTM= True
self._aAT= useTM # confusing, no?
self._approxConstTrackFreq= approxConstTrackFreq
if not self._aAT._pot == self._pot:
raise IOError("Potential in useTM=actionAngleTorus instance does not appear to be the same as given potential pot")
else:
self._useTM= False
if (multi is True): #if set to boolean, enable cpu_count processes
self._multi= multiprocessing.cpu_count()
else:
self._multi= multi
self._progenitor_setup(progenitor,leading,useTMHessian)
sigangle= conversion.parse_angle(sigangle)
deltaAngleTrack= conversion.parse_angle(deltaAngleTrack)
self._offset_setup(sigangle,leading,deltaAngleTrack)
# if progIsTrack, calculate the progenitor that gives a track that is approximately the given orbit
if progIsTrack:
self._setup_progIsTrack()
R0= conversion.parse_length_kpc(R0)
Zsun= conversion.parse_length_kpc(Zsun)
vsun= conversion.parse_velocity_kms(vsun)
vsun[0]= conversion.parse_velocity_kms(vsun[0])
vsun[1]= conversion.parse_velocity_kms(vsun[1])
vsun[2]= conversion.parse_velocity_kms(vsun[2])
self._setup_coord_transform(R0,Zsun,vsun,progenitor,custom_transform)
#Determine the stream track
if not nosetup:
self._determine_nTrackIterations(nTrackIterations)
self._determine_stream_track(nTrackChunks)
self._useInterp= useInterp
if interpTrack or self._useInterp:
self._interpolate_stream_track()
self._interpolate_stream_track_aA()
self.calc_stream_lb()
if not nospreadsetup: self._determine_stream_spread()
return None
def _progenitor_setup(self,progenitor,leading,useTMHessian):
"""The part of the setup relating to the progenitor's orbit"""
#Progenitor orbit: Calculate actions, frequencies, and angles for the progenitor
self._progenitor= progenitor() #call to get new Orbit
# Make sure we do not use physical coordinates
self._progenitor.turn_physical_off()
acfs= self._aA.actionsFreqsAngles(self._progenitor,
_firstFlip=(not leading),
use_physical=False)
self._progenitor_jr= acfs[0][0]
self._progenitor_lz= acfs[1][0]
self._progenitor_jz= acfs[2][0]
self._progenitor_Omegar= acfs[3]
self._progenitor_Omegaphi= acfs[4]
self._progenitor_Omegaz= acfs[5]
self._progenitor_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3)
self._progenitor_angler= acfs[6]
self._progenitor_anglephi= acfs[7]
self._progenitor_anglez= acfs[8]
self._progenitor_angle= numpy.array([acfs[6],acfs[7],acfs[8]]).reshape(3)
#Calculate dO/dJ Jacobian at the progenitor
if useTMHessian:
h, fr,fp,fz,e= self._aAT.hessianFreqs(self._progenitor_jr,
self._progenitor_lz,
self._progenitor_jz)
self._dOdJp= h
# Replace frequencies with TM frequencies
self._progenitor_Omegar= fr
self._progenitor_Omegaphi= fp
self._progenitor_Omegaz= fz
self._progenitor_Omega= numpy.array([self._progenitor_Omegar,
self._progenitor_Omegaphi,
self._progenitor_Omegaz]).reshape(3)
else:
self._dOdJp= calcaAJac(self._progenitor.vxvv[0],
self._aA,dxv=None,dOdJ=True,
_initacfs=acfs)
self._dOdJpInv= numpy.linalg.inv(self._dOdJp)
self._dOdJpEig= numpy.linalg.eig(self._dOdJp)
return None
def _offset_setup(self,sigangle,leading,deltaAngleTrack):
"""The part of the setup related to calculating the stream/progenitor offset"""
#From the progenitor orbit, determine the sigmas in J and angle
self._sigjr= (self._progenitor.rap()-self._progenitor.rperi())/numpy.pi*self._sigv
self._siglz= self._progenitor.rperi()*self._sigv
self._sigjz= 2.*self._progenitor.zmax()/numpy.pi*self._sigv
#Estimate the frequency covariance matrix from a diagonal J matrix x dOdJ
self._sigjmatrix= numpy.diag([self._sigjr**2.,
self._siglz**2.,
self._sigjz**2.])
self._sigomatrix= numpy.dot(self._dOdJp,
numpy.dot(self._sigjmatrix,self._dOdJp.T))
#Estimate angle spread as the ratio of the largest to the middle eigenvalue
self._sigomatrixEig= numpy.linalg.eig(self._sigomatrix)
self._sigomatrixEigsortIndx= numpy.argsort(self._sigomatrixEig[0])
self._sortedSigOEig= sorted(self._sigomatrixEig[0])
if sigangle is None:
self._sigangle= self._sigv*1.8
else:
self._sigangle= sigangle
self._sigangle2= self._sigangle**2.
self._lnsigangle= numpy.log(self._sigangle)
#Estimate the frequency mean as lying along the direction of the largest eigenvalue
self._dsigomeanProgDirection= self._sigomatrixEig[1][:,numpy.argmax(self._sigomatrixEig[0])]
self._progenitor_Omega_along_dOmega= \
numpy.dot(self._progenitor_Omega,self._dsigomeanProgDirection)
#Make sure we are modeling the correct part of the stream
self._leading= leading
self._sigMeanSign= 1.
if self._leading and self._progenitor_Omega_along_dOmega < 0.:
self._sigMeanSign= -1.
elif not self._leading and self._progenitor_Omega_along_dOmega > 0.:
self._sigMeanSign= -1.
self._progenitor_Omega_along_dOmega*= self._sigMeanSign
self._sigomean= self._progenitor_Omega\
+self._sigMeanOffset*self._sigMeanSign\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))\
*self._dsigomeanProgDirection
#numpy.dot(self._dOdJp,
# numpy.array([self._sigjr,self._siglz,self._sigjz]))
self._dsigomeanProg= self._sigomean-self._progenitor_Omega
self._meandO= self._sigMeanOffset\
*numpy.sqrt(numpy.amax(self._sigomatrixEig[0]))
#Store cholesky of sigomatrix for fast evaluation
self._sigomatrixNorm=\
numpy.sqrt(numpy.sum(self._sigomatrix**2.))
self._sigomatrixinv, self._sigomatrixLogdet= \
fast_cholesky_invert(self._sigomatrix/self._sigomatrixNorm,
tiny=10.**-15.,logdet=True)
self._sigomatrixinv/= self._sigomatrixNorm
deltaAngleTrackLim = (self._sigMeanOffset+4.) * numpy.sqrt(
self._sortedSigOEig[2]) * self._tdisrupt
if (deltaAngleTrack is None):
deltaAngleTrack = deltaAngleTrackLim
else:
if (deltaAngleTrack > deltaAngleTrackLim):
warnings.warn("WARNING: angle range large compared to plausible value.", galpyWarning)
self._deltaAngleTrack= deltaAngleTrack
return None
def _setup_coord_transform(self,R0,Zsun,vsun,progenitor,custom_transform):
#Set the coordinate-transformation parameters; check that these do not conflict with those in the progenitor orbit object; need to use the original, since this objects _progenitor has physical turned off
if progenitor._roSet \
and (numpy.fabs(self._ro-progenitor._ro) > 10.**-.8 \
or numpy.fabs(R0-progenitor._ro) > 10.**-8.):
warnings.warn("Warning: progenitor's ro does not agree with streamdf's ro and R0; this may have unexpected consequences when projecting into observables", galpyWarning)
if progenitor._voSet \
and numpy.fabs(self._vo-progenitor._vo) > 10.**-8.:
warnings.warn("Warning: progenitor's vo does not agree with streamdf's vo; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.fabs(Zsun-progenitor._zo) > 10.**-8.:
warnings.warn("Warning: progenitor's zo does not agree with streamdf's Zsun; this may have unexpected consequences when projecting into observables", galpyWarning)
if (progenitor._roSet or progenitor._voSet) \
and numpy.any(numpy.fabs(vsun-numpy.array([0.,self._vo,0.])\
-progenitor._solarmotion) > 10.**-8.):
warnings.warn("Warning: progenitor's solarmotion does not agree with streamdf's vsun (after accounting for vo); this may have unexpected consequences when projecting into observables", galpyWarning)
self._R0= R0
self._Zsun= Zsun
self._vsun= vsun
self._custom_transform= custom_transform
return None
def _setup_progIsTrack(self):
"""If progIsTrack, the progenitor orbit that was passed to the
streamdf initialization is the track at zero angle separation;
this routine computes an actual progenitor position that gives
the desired track given the parameters of the streamdf"""
# We need to flip the sign of the offset, to go to the progenitor
self._sigMeanSign*= -1.
# Use _determine_stream_track_single to calculate the track-progenitor
# offset at zero angle separation
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
# Setup the new progenitor orbit
progenitor= Orbit(prog_stream_offset[3])
# Flip the offset sign again
self._sigMeanSign*= -1.
# Now re-do the previous setup
self._progenitor_setup(progenitor,self._leading,False)
self._offset_setup(self._sigangle,self._leading,
self._deltaAngleTrack)
return None
@physical_conversion('angle',pop=True)
def misalignment(self,isotropic=False,**kwargs):
"""
NAME:
misalignment
PURPOSE:
calculate the misalignment between the progenitor's frequency
and the direction along which the stream disrupts
INPUT:
isotropic= (False), if True, return the misalignment assuming an isotropic action distribution
OUTPUT:
misalignment in rad
HISTORY:
2013-12-05 - Written - Bovy (IAS)
2017-10-28 - Changed output unit to rad - Bovy (UofT)
"""
warnings.warn("In versions >1.3, the output unit of streamdf.misalignment has been changed to radian (from degree before)",galpyWarning)
if isotropic:
dODir= self._dOdJpEig[1][:,numpy.argmax(numpy.fabs(self._dOdJpEig[0]))]
else:
dODir= self._dsigomeanProgDirection
out= numpy.arccos(numpy.sum(self._progenitor_Omega*dODir)/numpy.sqrt(numpy.sum(self._progenitor_Omega**2.)))
if out > numpy.pi/2.: return out-numpy.pi
else: return out
def freqEigvalRatio(self,isotropic=False):
"""
NAME:
freqEigvalRatio
PURPOSE:
calculate the ratio between the largest and 2nd-to-largest (in abs)
eigenvalue of sqrt(dO/dJ^T V_J dO/dJ)
(if this is big, a 1D stream will form)
INPUT:
isotropic= (False), if True, return the ratio assuming an isotropic action distribution (i.e., just of dO/dJ)
OUTPUT:
ratio between eigenvalues of fabs(dO / dJ)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isotropic:
sortedEig= sorted(numpy.fabs(self._dOdJpEig[0]))
return sortedEig[2]/sortedEig[1]
else:
return numpy.sqrt(self._sortedSigOEig)[2]\
/numpy.sqrt(self._sortedSigOEig)[1]
@physical_conversion('time',pop=True)
def estimateTdisrupt(self,deltaAngle):
"""
NAME:
estimateTdisrupt
PURPOSE:
estimate the time of disruption
INPUT:
deltaAngle- spread in angle since disruption
OUTPUT:
time in natural units
HISTORY:
2013-11-27 - Written - Bovy (IAS)
"""
return deltaAngle\
/numpy.sqrt(numpy.sum(self._dsigomeanProg**2.))
def subhalo_encounters(self,venc=numpy.inf,sigma=150./220.,
nsubhalo=0.3,bmax=0.025,yoon=False):
"""
NAME:
subhalo_encounters
PURPOSE:
estimate the number of encounters with subhalos over the lifetime of this stream, using a formalism similar to that of Yoon et al. (2011)
INPUT:
venc= (numpy.inf) count encounters with (relative) speeds less than this (relative radial velocity in cylindrical stream frame, unless yoon is True) (can be Quantity)
sigma= (150/220) velocity dispersion of the DM subhalo population (can be Quantity)
nsubhalo= (0.3) spatial number density of subhalos (can be Quantity)
bmax= (0.025) maximum impact parameter (if larger than width of stream) (can be Quantity)
yoon= (False) if True, use erroneous Yoon et al. formula
OUTPUT:
number of encounters
HISTORY:
2016-01-19 - Written - Bovy (UofT)
"""
venc= conversion.parse_velocity(venc,vo=self._vo)
sigma= conversion.parse_velocity(sigma,vo=self._vo)
nsubhalo= conversion.parse_numdens(nsubhalo,ro=self._ro)
bmax= conversion.parse_length(bmax,ro=self._ro)
Ravg= numpy.mean(numpy.sqrt(self._progenitor.orbit[0,:,0]**2.
+self._progenitor.orbit[0,:,3]**2.))
if numpy.isinf(venc):
vencFac= 1.
elif yoon:
vencFac= (1.-(1.+venc**2./4./sigma**2.)\
*numpy.exp(-venc**2./4./sigma**2.))
else:
vencFac= (1.-numpy.exp(-venc**2./2./sigma**2.))
if yoon:
yoonFac= 2*numpy.sqrt(2.)
else:
yoonFac= 1.
# Figure out width of stream
w= self.sigangledAngle(self._meandO*self._tdisrupt,simple=True,
use_physical=False)
if bmax < w*Ravg/2.: bmax= w*Ravg/2.
return yoonFac/numpy.sqrt(2.)*numpy.sqrt(numpy.pi)*Ravg*sigma\
*self._tdisrupt**2.*self._meandO\
*bmax*nsubhalo*vencFac
############################STREAM TRACK FUNCTIONS#############################
def plotTrack(self,d1='x',d2='z',interp=True,spread=0,simple=_USESIMPLE,
*args,**kwargs):
"""
NAME:
plotTrack
PURPOSE:
plot the stream track
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
interp= (True) if True, use the interpolated stream track
spread= (0) if int > 0, also plot the spread around the track as spread x sigma
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
simple= (False), if True, use a simple estimate for the spread in perpendicular angle
galpy.util.plot.plotplot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
if not hasattr(self,'_ObsTrackLB') and \
(d1.lower() == 'll' or d1.lower() == 'bb'
or d1.lower() == 'dist' or d1.lower() == 'pmll'
or d1.lower() == 'pmbb' or d1.lower() == 'vlos'
or d2.lower() == 'll' or d2.lower() == 'bb'
or d2.lower() == 'dist' or d2.lower() == 'pmll'
or d2.lower() == 'pmbb' or d2.lower() == 'vlos'):
self.calc_stream_lb()
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_track_dim(d1,interp=interp,phys=phys)
ty= self._parse_track_dim(d2,interp=interp,phys=phys)
plot.plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
if spread:
addx, addy= self._parse_track_spread(d1,d2,interp=interp,phys=phys,
simple=simple)
if ('ls' in kwargs and kwargs['ls'] == 'none') \
or ('linestyle' in kwargs \
and kwargs['linestyle'] == 'none'):
kwargs.pop('ls',None)
kwargs.pop('linestyle',None)
spreadls= 'none'
else:
spreadls= '-.'
spreadmarker= kwargs.pop('marker',None)
spreadcolor= kwargs.pop('color',None)
spreadlw= kwargs.pop('lw',1.)
plot.plot(tx+spread*addx,ty+spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
plot.plot(tx-spread*addx,ty-spread*addy,ls=spreadls,
marker=spreadmarker,color=spreadcolor,
lw=spreadlw,
overplot=True)
return None
def plotProgenitor(self,d1='x',d2='z',*args,**kwargs):
"""
NAME:
plotProgenitor
PURPOSE:
plot the progenitor orbit
INPUT:
d1= plot this on the X axis ('x','y','z','R','phi','vx','vy','vz','vR','vt','ll','bb','dist','pmll','pmbb','vlos')
d2= plot this on the Y axis (same list as for d1)
scaleToPhysical= (False), if True, plot positions in kpc and velocities in km/s
galpy.util.plot.plot args and kwargs
OUTPUT:
plot to output device
HISTORY:
2013-12-09 - Written - Bovy (IAS)
"""
tts= self._progenitor.t[self._progenitor.t \
< self._trackts[self._nTrackChunks-1]]
obs= [self._R0,0.,self._Zsun]
obs.extend(self._vsun)
phys= kwargs.pop('scaleToPhysical',False)
tx= self._parse_progenitor_dim(d1,tts,ro=self._ro,vo=self._vo,
obs=obs,phys=phys)
ty= self._parse_progenitor_dim(d2,tts,ro=self._ro,vo=self._vo,
obs=obs,phys=phys)
plot.plot(tx,ty,*args,
xlabel=_labelDict[d1.lower()],
ylabel=_labelDict[d2.lower()],
**kwargs)
return None
def _parse_track_dim(self,d1,interp=True,phys=False):
"""Parse the dimension to plot the stream track for"""
if interp: interpStr= 'interpolated'
else: interpStr= ''
if d1.lower() == 'x':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,0]
elif d1.lower() == 'y':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,1]
elif d1.lower() == 'z':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,2]
elif d1.lower() == 'r':
tx= self.__dict__['_%sObsTrack' % interpStr][:,0]
elif d1.lower() == 'phi':
tx= self.__dict__['_%sObsTrack' % interpStr][:,5]
elif d1.lower() == 'vx':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,3]
elif d1.lower() == 'vy':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,4]
elif d1.lower() == 'vz':
tx= self.__dict__['_%sObsTrackXY' % interpStr][:,5]
elif d1.lower() == 'vr':
tx= self.__dict__['_%sObsTrack' % interpStr][:,1]
elif d1.lower() == 'vt':
tx= self.__dict__['_%sObsTrack' % interpStr][:,2]
elif d1.lower() == 'll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,0]
elif d1.lower() == 'bb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,1]
elif d1.lower() == 'dist':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,2]
elif d1.lower() == 'pmll':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,4]
elif d1.lower() == 'pmbb':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,5]
elif d1.lower() == 'vlos':
tx= self.__dict__['_%sObsTrackLB' % interpStr][:,3]
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._ro
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._vo
return tx
def _parse_progenitor_dim(self,d1,ts,ro=None,vo=None,obs=None,
phys=False):
"""Parse the dimension to plot the progenitor orbit for"""
if d1.lower() == 'x':
tx= self._progenitor.x(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'y':
tx= self._progenitor.y(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'z':
tx= self._progenitor.z(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'r':
tx= self._progenitor.R(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'phi':
tx= self._progenitor.phi(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vx':
tx= self._progenitor.vx(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vy':
tx= self._progenitor.vy(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vz':
tx= self._progenitor.vz(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vr':
tx= self._progenitor.vR(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'vt':
tx= self._progenitor.vT(ts,ro=ro,vo=vo,obs=obs,use_physical=False)
elif d1.lower() == 'll':
tx= self._progenitor.ll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'bb':
tx= self._progenitor.bb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'dist':
tx= self._progenitor.dist(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmll':
tx= self._progenitor.pmll(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'pmbb':
tx= self._progenitor.pmbb(ts,ro=ro,vo=vo,obs=obs)
elif d1.lower() == 'vlos':
tx= self._progenitor.vlos(ts,ro=ro,vo=vo,obs=obs)
if phys and (d1.lower() == 'x' or d1.lower() == 'y' \
or d1.lower() == 'z' or d1.lower() == 'r'):
tx= copy.copy(tx)
tx*= self._ro
if phys and (d1.lower() == 'vx' or d1.lower() == 'vy' \
or d1.lower() == 'vz' or d1.lower() == 'vr' \
or d1.lower() == 'vt'):
tx= copy.copy(tx)
tx*= self._vo
return tx
def _parse_track_spread(self,d1,d2,interp=True,phys=False,
simple=_USESIMPLE):
"""Determine the spread around the track"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
okaySpreadR= ['r','vr','vt','z','vz','phi']
okaySpreadXY= ['x','y','z','vx','vy','vz']
okaySpreadLB= ['ll','bb','dist','vlos','pmll','pmbb']
#Determine which coordinate system we're in
coord= [False,False,False] #R, XY, LB
if d1.lower() in okaySpreadR and d2.lower() in okaySpreadR:
coord[0]= True
elif d1.lower() in okaySpreadXY and d2.lower() in okaySpreadXY:
coord[1]= True
elif d1.lower() in okaySpreadLB and d2.lower() in okaySpreadLB:
coord[2]= True
else:
raise NotImplementedError("plotting the spread for coordinates from different systems not implemented yet ...")
#Get the right 2D Jacobian
indxDict= {}
indxDict['r']= 0
indxDict['vr']= 1
indxDict['vt']= 2
indxDict['z']= 3
indxDict['vz']= 4
indxDict['phi']= 5
indxDictXY= {}
indxDictXY['x']= 0
indxDictXY['y']= 1
indxDictXY['z']= 2
indxDictXY['vx']= 3
indxDictXY['vy']= 4
indxDictXY['vz']= 5
indxDictLB= {}
indxDictLB['ll']= 0
indxDictLB['bb']= 1
indxDictLB['dist']= 2
indxDictLB['vlos']= 3
indxDictLB['pmll']= 4
indxDictLB['pmbb']= 5
if coord[0]:
relevantCov= self._allErrCovs
relevantDict= indxDict
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._ro,self._vo,self._vo,
self._ro,self._vo,1.])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[1]:
relevantCov= self._allErrCovsXY
relevantDict= indxDictXY
if phys:#apply scale factors
tcov= copy.copy(relevantCov)
scaleFac= numpy.array([self._ro,self._ro,self._ro,
self._vo,self._vo,self._vo])
tcov*= numpy.tile(scaleFac,(6,1))
tcov*= numpy.tile(scaleFac,(6,1)).T
relevantCov= tcov
elif coord[2]:
relevantCov= self._allErrCovsLBUnscaled
relevantDict= indxDictLB
indx0= numpy.array([[relevantDict[d1.lower()],relevantDict[d1.lower()]],
[relevantDict[d2.lower()],relevantDict[d2.lower()]]])
indx1= numpy.array([[relevantDict[d1.lower()],relevantDict[d2.lower()]],
[relevantDict[d1.lower()],relevantDict[d2.lower()]]])
cov= relevantCov[:,indx0,indx1] #cov contains all nTrackChunks covs
if not interp:
out= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
out[ii]= minEigvec*numpy.sqrt(covEig[0][minIndx])
eigDir= minEigvec
else:
#We slerp the minor eigenvector and interpolate the eigenvalue
#First store all of the eigenvectors on the track
allEigval= numpy.empty(self._nTrackChunks)
allEigvec= numpy.empty((self._nTrackChunks,2))
eigDir= numpy.array([1.,0.])
for ii in range(self._nTrackChunks):
covEig= numpy.linalg.eig(cov[ii])
minIndx= numpy.argmin(covEig[0])
minEigvec= covEig[1][:,minIndx] #this is the direction of the transverse spread
if numpy.sum(minEigvec*eigDir) < 0.: minEigvec*= -1. #Keep them pointing in the same direction
allEigval[ii]= numpy.sqrt(covEig[0][minIndx])
allEigvec[ii]= minEigvec
eigDir= minEigvec
#Now interpolate where needed
interpEigval=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allEigval,k=3)
interpolatedEigval= interpEigval(self._interpolatedThetasTrack)
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
2))
for ii in range(self._nTrackChunks-1):
slerpOmega= numpy.arccos(numpy.sum(allEigvec[ii]*allEigvec[ii+1]))
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(2):
interpolatedEigvec[slerpIndx,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmega)*allEigvec[ii,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmega)*allEigvec[ii+1,jj])/numpy.sin(slerpOmega)
out= numpy.tile(interpolatedEigval.T,(2,1)).T*interpolatedEigvec
if coord[2]: #if LB, undo rescalings that were applied before
out[:,0]*= self._ErrCovsLBScale[relevantDict[d1.lower()]]
out[:,1]*= self._ErrCovsLBScale[relevantDict[d2.lower()]]
return (out[:,0],out[:,1])
def plotCompareTrackAAModel(self,**kwargs):
"""
NAME:
plotCompareTrackAAModel
PURPOSE:
plot the comparison between the underlying model's dOmega_perp vs. dangle_r (line) and the track in (x,v)'s dOmega_perp vs. dangle_r (dots; explicitly calculating the track's action-angle coordinates)
INPUT:
galpy.util.plot.plot kwargs
OUTPUT:
plot
HISTORY:
2014-08-27 - Written - Bovy (IAS)
"""
#First calculate the model
model_adiff= (self._ObsTrackAA[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
model_operp= numpy.dot(self._ObsTrackAA[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
#Then calculate the track's frequency-angle coordinates
if self._multi is None:
aatrack= numpy.empty((self._nTrackChunks,6))
for ii in range(self._nTrackChunks):
aatrack[ii]= self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[ii,:]),
use_physical=False)[3:]
else:
aatrack= numpy.reshape(\
multi.parallel_map(
(lambda x: self._aA.actionsFreqsAngles(Orbit(self._ObsTrack[x,:]),use_physical=False)[3:]),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi])),(self._nTrackChunks,6))
track_adiff= (aatrack[:,3:]-self._progenitor_angle)[:,0]\
*self._sigMeanSign
track_operp= numpy.dot(aatrack[:,:3]-self._progenitor_Omega,
self._dsigomeanProgDirection)\
*self._sigMeanSign
overplot= kwargs.pop('overplot',False)
yrange= kwargs.pop('yrange',
[0.,numpy.amax(numpy.hstack((model_operp,track_operp)))*1.1])
xlabel= kwargs.pop('xlabel',r'$\Delta \theta_R$')
ylabel= kwargs.pop('ylabel',r'$\Delta \Omega_\parallel$')
plot.plot(model_adiff,model_operp,'k-',overplot=overplot,
xlabel=xlabel,ylabel=ylabel,yrange=yrange,**kwargs)
plot.plot(track_adiff,track_operp,'ko',overplot=True,
**kwargs)
return None
def _determine_nTrackIterations(self,nTrackIterations):
"""Determine a good value for nTrackIterations based on the misalignment between stream and orbit; just based on some rough experience for now"""
if not nTrackIterations is None:
self.nTrackIterations= nTrackIterations
return None
if numpy.fabs(self.misalignment(quantity=False)) < 1./180.*numpy.pi:
self.nTrackIterations= 0
elif numpy.fabs(self.misalignment(quantity=False)) >= 1./180.*numpy.pi \
and numpy.fabs(self.misalignment(quantity=False)) < 3./180.*numpy.pi:
self.nTrackIterations= 1
elif numpy.fabs(self.misalignment(quantity=False)) >= 3./180.*numpy.pi:
self.nTrackIterations= 2
return None
def _determine_stream_track(self,nTrackChunks):
"""Determine the track of the stream in real space"""
#Determine how much orbital time is necessary for the progenitor's orbit to cover the stream
if nTrackChunks is None:
#default is floor(self._deltaAngleTrack/0.15)+1
self._nTrackChunks= int(numpy.floor(self._deltaAngleTrack/0.15))+1
else:
self._nTrackChunks= nTrackChunks
if self._nTrackChunks < 4: self._nTrackChunks= 4
if not hasattr(self,'nInterpolatedTrackChunks'):
self.nInterpolatedTrackChunks= 1001
dt= self._deltaAngleTrack\
/self._progenitor_Omega_along_dOmega
self._trackts= numpy.linspace(0.,2*dt,2*self._nTrackChunks-1) #to be sure that we cover it
if self._useTM:
return self._determine_stream_track_TM()
#Instantiate an auxiliaryTrack, which is an Orbit instance at the mean frequency of the stream, and zero angle separation wrt the progenitor; prog_stream_offset is the offset between this track and the progenitor at zero angle
prog_stream_offset=\
_determine_stream_track_single(self._aA,
self._progenitor,
0., #time = 0
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
0.) #angle = 0
auxiliaryTrack= Orbit(prog_stream_offset[3])
if dt < 0.:
self._trackts= numpy.linspace(0.,-2.*dt,2*self._nTrackChunks-1)
#Flip velocities before integrating
auxiliaryTrack= auxiliaryTrack.flip()
auxiliaryTrack.integrate(self._trackts,self._pot)
if dt < 0.:
#Flip velocities again
auxiliaryTrack.orbit[...,1]= -auxiliaryTrack.orbit[...,1]
auxiliaryTrack.orbit[...,2]= -auxiliaryTrack.orbit[...,2]
auxiliaryTrack.orbit[...,4]= -auxiliaryTrack.orbit[...,4]
#Calculate the actions, frequencies, and angle for this auxiliary orbit
acfs= self._aA.actionsFreqs(auxiliaryTrack(0.),
use_physical=False)
auxiliary_Omega= numpy.array([acfs[3],acfs[4],acfs[5]]).reshape(3\
)
auxiliary_Omega_along_dOmega= \
numpy.dot(auxiliary_Omega,self._dsigomeanProgDirection)
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
allAcfsTrack= numpy.empty((self._nTrackChunks,9))
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
auxiliaryTrack,
self._trackts[ii]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega), #this factor accounts for the difference in frequency between the progenitor and the auxiliary track
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,auxiliaryTrack,
self._trackts[x]*numpy.fabs(self._progenitor_Omega_along_dOmega/auxiliary_Omega_along_dOmega),
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Repeat the track calculation using the previous track, to get closer to it
for nn in range(self.nTrackIterations):
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_single(self._aA,
Orbit(ObsTrack[ii,:]),
0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x:self.meanOmega(x,use_physical=False),
thetasTrack[ii])
allAcfsTrack[ii,:]= multiOut[0]
alljacsTrack[ii,:,:]= multiOut[1]
allinvjacsTrack[ii,:,:]= multiOut[2]
ObsTrack[ii,:]= multiOut[3]
ObsTrackAA[ii,:]= multiOut[4]
detdOdJps[ii]= multiOut[5]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_single(self._aA,Orbit(ObsTrack[x,:]),0.,
self._progenitor_angle,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allAcfsTrack[ii,:]= multiOut[ii][0]
alljacsTrack[ii,:,:]= multiOut[ii][1]
allinvjacsTrack[ii,:,:]= multiOut[ii][2]
ObsTrack[ii,:]= multiOut[ii][3]
ObsTrackAA[ii,:]= multiOut[ii][4]
detdOdJps[ii]= multiOut[ii][5]
#Store the track
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._allAcfsTrack= allAcfsTrack
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
self._calc_ObsTrackXY()
return None
def _calc_ObsTrackXY(self):
#Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
self._ObsTrackXY= numpy.empty_like(self._ObsTrack)
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
self._ObsTrackXY[:,0]= TrackX
self._ObsTrackXY[:,1]= TrackY
self._ObsTrackXY[:,2]= TrackZ
self._ObsTrackXY[:,3]= TrackvX
self._ObsTrackXY[:,4]= TrackvY
self._ObsTrackXY[:,5]= TrackvZ
return None
def _determine_stream_track_TM(self):
# With TM, can get the track in a single shot
#Now calculate the actions, frequencies, and angles + Jacobian for each chunk
thetasTrack= numpy.linspace(0.,self._deltaAngleTrack,
self._nTrackChunks)
if self._approxConstTrackFreq:
alljacsTrack, allinvjacsTrack, ObsTrack, ObsTrackAA, detdOdJps= \
_determine_stream_track_TM_approxConstantTrackFreq(\
self._aAT,
numpy.array([self._progenitor_jr,self._progenitor_lz,
self._progenitor_jz]),
self._progenitor_Omega,
self._progenitor_angle,
self._dOdJp,
self._dOdJpInv,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack)
#Store the track, didn't compute _allAcfsTrack
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
self._calc_ObsTrackXY()
return None
alljacsTrack= numpy.empty((self._nTrackChunks,6,6))
allinvjacsTrack= numpy.empty((self._nTrackChunks,6,6))
ObsTrack= numpy.empty((self._nTrackChunks,6))
ObsTrackAA= numpy.empty((self._nTrackChunks,6))
detdOdJps= numpy.empty((self._nTrackChunks))
if self._multi is None:
for ii in range(self._nTrackChunks):
multiOut= _determine_stream_track_TM_single(\
self._aAT,
numpy.array([self._progenitor_jr,self._progenitor_lz,
self._progenitor_jz]),
self._progenitor_Omega,
self._progenitor_angle,
self._dOdJp,
self._dOdJpInv,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[ii])
alljacsTrack[ii,:,:]= multiOut[0]
allinvjacsTrack[ii,:,:]= multiOut[1]
ObsTrack[ii,:]= multiOut[2]
ObsTrackAA[ii,:]= multiOut[3]
detdOdJps[ii]= multiOut[4]
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_track_TM_single(\
self._aAT,
numpy.array([self._progenitor_jr,self._progenitor_lz,
self._progenitor_jz]),
self._progenitor_Omega,
self._progenitor_angle,
self._dOdJp,
self._dOdJpInv,
self._sigMeanSign,
self._dsigomeanProgDirection,
lambda x: self.meanOmega(x,use_physical=False),
thetasTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
alljacsTrack[ii,:,:]= multiOut[ii][0]
allinvjacsTrack[ii,:,:]= multiOut[ii][1]
ObsTrack[ii,:]= multiOut[ii][2]
ObsTrackAA[ii,:]= multiOut[ii][3]
detdOdJps[ii]= multiOut[ii][4]
#Store the track, didn't compute _allAcfsTrack
self._thetasTrack= thetasTrack
self._ObsTrack= ObsTrack
self._ObsTrackAA= ObsTrackAA
self._alljacsTrack= alljacsTrack
self._allinvjacsTrack= allinvjacsTrack
self._detdOdJps= detdOdJps
self._meandetdOdJp= numpy.mean(self._detdOdJps)
self._logmeandetdOdJp= numpy.log(self._meandetdOdJp)
#Also calculate _ObsTrackXY in XYZ,vXYZ coordinates
self._calc_ObsTrackXY()
return None
def _determine_stream_spread(self,simple=_USESIMPLE):
"""Determine the spread around the stream track, just sets matrices that describe the covariances"""
allErrCovs= numpy.empty((self._nTrackChunks,6,6))
if self._multi is None:
for ii in range(self._nTrackChunks):
allErrCovs[ii]= _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[ii],
lambda x: self.sigOmega(x,use_physical=False),
lambda y: self.sigangledAngle(y,simple=simple,use_physical=False),
self._allinvjacsTrack[ii])
else:
multiOut= multi.parallel_map(\
(lambda x: _determine_stream_spread_single(self._sigomatrixEig,
self._thetasTrack[x],
lambda x: self.sigOmega(x,use_physical=False),
lambda y: self.sigangledAngle(y,simple=simple,use_physical=False),
self._allinvjacsTrack[x])),
range(self._nTrackChunks),
numcores=numpy.amin([self._nTrackChunks,
multiprocessing.cpu_count(),
self._multi]))
for ii in range(self._nTrackChunks):
allErrCovs[ii]= multiOut[ii]
self._allErrCovs= allErrCovs
#Also propagate to XYZ coordinates
allErrCovsXY= numpy.empty_like(self._allErrCovs)
allErrCovsEigvalXY= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecXY= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjac= coords.cyl_to_rect_jac(*self._ObsTrack[ii])
allErrCovsXY[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovs[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsXY[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalXY[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecXY[ii]= teig[1][:,sortIndx]
self._allErrCovsXY= allErrCovsXY
#Interpolate the allErrCovsXY covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalXY=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalXY[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsXY= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalXY[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecXY[ii,:,jj]*allErrCovsEigvecXY[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecXY[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecXY[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsXY[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsXY= interpolatedAllErrCovsXY
#Also interpolate in l and b coordinates
self._determine_stream_spreadLB(simple=simple)
return None
def _determine_stream_spreadLB(self,simple=_USESIMPLE,
ro=None,vo=None,
R0=None,Zsun=None,vsun=None):
"""Determine the spread in the stream in observable coordinates"""
if not hasattr(self,'_allErrCovs'):
self._determine_stream_spread(simple=simple)
if ro is None:
ro= self._ro
if vo is None:
vo= self._vo
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
allErrCovsLB= numpy.empty_like(self._allErrCovs)
obs= [R0,0.,Zsun]
obs.extend(vsun)
obskwargs= {}
obskwargs['ro']= ro
obskwargs['vo']= vo
obskwargs['obs']= obs
obskwargs['quantity']= False
self._ErrCovsLBScale= [180.,90.,
self._progenitor.dist(**obskwargs),
numpy.fabs(self._progenitor.vlos(**obskwargs)),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.),
numpy.sqrt(self._progenitor.pmll(**obskwargs)**2.
+self._progenitor.pmbb(**obskwargs)**2.)]
allErrCovsEigvalLB= numpy.empty((len(self._thetasTrack),6))
allErrCovsEigvecLB= numpy.empty_like(self._allErrCovs)
eigDir= numpy.array([numpy.array([1.,0.,0.,0.,0.,0.]) for ii in range(6)])
for ii in range(self._nTrackChunks):
tjacXY= coords.galcenrect_to_XYZ_jac(*self._ObsTrackXY[ii])
tjacLB= coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
tjacLB[:3,:]/= ro
tjacLB[3:,:]/= vo
for jj in range(6):
tjacLB[:,jj]*= self._ErrCovsLBScale[jj]
tjac= numpy.dot(numpy.linalg.inv(tjacLB),tjacXY)
allErrCovsLB[ii]=\
numpy.dot(tjac,numpy.dot(self._allErrCovsXY[ii],tjac.T))
#Eigen decomposition for interpolation
teig= numpy.linalg.eig(allErrCovsLB[ii])
#Sort them to match them up later
sortIndx= numpy.argsort(teig[0])
allErrCovsEigvalLB[ii]= teig[0][sortIndx]
#Make sure the eigenvectors point in the same direction
for jj in range(6):
if numpy.sum(eigDir[jj]*teig[1][:,sortIndx[jj]]) < 0.:
teig[1][:,sortIndx[jj]]*= -1.
eigDir[jj]= teig[1][:,sortIndx[jj]]
allErrCovsEigvecLB[ii]= teig[1][:,sortIndx]
self._allErrCovsLBUnscaled= allErrCovsLB
#Interpolate the allErrCovsLB covariance matrices along the interpolated track
#Interpolate the eigenvalues
interpAllErrCovsEigvalLB=\
[interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
allErrCovsEigvalLB[:,ii],
k=3) for ii in range(6)]
#Now build the interpolated allErrCovsXY using slerp
interpolatedAllErrCovsLB= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
interpolatedEigval=\
numpy.array([interpAllErrCovsEigvalLB[ii](self._interpolatedThetasTrack) for ii in range(6)]) #6,ninterp
#Interpolate in chunks
interpolatedEigvec= numpy.empty((len(self._interpolatedThetasTrack),
6,6))
for ii in range(self._nTrackChunks-1):
slerpOmegas=\
[numpy.arccos(numpy.sum(allErrCovsEigvecLB[ii,:,jj]*allErrCovsEigvecLB[ii+1,:,jj])) for jj in range(6)]
slerpts= (self._interpolatedThetasTrack-self._thetasTrack[ii])/\
(self._thetasTrack[ii+1]-self._thetasTrack[ii])
slerpIndx= (slerpts >= 0.)*(slerpts <= 1.)
for jj in range(6):
for kk in range(6):
interpolatedEigvec[slerpIndx,kk,jj]=\
(numpy.sin((1-slerpts[slerpIndx])*slerpOmegas[jj])*allErrCovsEigvecLB[ii,kk,jj]
+numpy.sin(slerpts[slerpIndx]*slerpOmegas[jj])*allErrCovsEigvecLB[ii+1,kk,jj])/numpy.sin(slerpOmegas[jj])
for ii in range(len(self._interpolatedThetasTrack)):
interpolatedAllErrCovsLB[ii]=\
numpy.dot(interpolatedEigvec[ii],
numpy.dot(numpy.diag(interpolatedEigval[:,ii]),
interpolatedEigvec[ii].T))
self._interpolatedAllErrCovsLBUnscaled= interpolatedAllErrCovsLB
#Also calculate the (l,b,..) -> (X,Y,..) Jacobian at all of the interpolated and not interpolated points
trackLogDetJacLB= numpy.empty_like(self._thetasTrack)
interpolatedTrackLogDetJacLB=\
numpy.empty_like(self._interpolatedThetasTrack)
for ii in range(self._nTrackChunks):
tjacLB= coords.lbd_to_XYZ_jac(*self._ObsTrackLB[ii],
degree=True)
trackLogDetJacLB[ii]= numpy.log(numpy.linalg.det(tjacLB))
self._trackLogDetJacLB= trackLogDetJacLB
for ii in range(len(self._interpolatedThetasTrack)):
tjacLB=\
coords.lbd_to_XYZ_jac(*self._interpolatedObsTrackLB[ii],
degree=True)
interpolatedTrackLogDetJacLB[ii]=\
numpy.log(numpy.linalg.det(tjacLB))
self._interpolatedTrackLogDetJacLB= interpolatedTrackLogDetJacLB
return None
def _interpolate_stream_track(self):
"""Build interpolations of the stream track"""
if hasattr(self,'_interpolatedThetasTrack'):
return None #Already did this
TrackX= self._ObsTrack[:,0]*numpy.cos(self._ObsTrack[:,5])
TrackY= self._ObsTrack[:,0]*numpy.sin(self._ObsTrack[:,5])
TrackZ= self._ObsTrack[:,3]
TrackvX, TrackvY, TrackvZ=\
coords.cyl_to_rect_vec(self._ObsTrack[:,1],
self._ObsTrack[:,2],
self._ObsTrack[:,4],
self._ObsTrack[:,5])
#Interpolate
self._interpTrackX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackX,k=3)
self._interpTrackY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackY,k=3)
self._interpTrackZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackZ,k=3)
self._interpTrackvX=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvX,k=3)
self._interpTrackvY=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvY,k=3)
self._interpTrackvZ=\
interpolate.InterpolatedUnivariateSpline(self._thetasTrack,
TrackvZ,k=3)
#Now store an interpolated version of the stream track
self._interpolatedThetasTrack=\
numpy.linspace(0.,self._deltaAngleTrack,
self.nInterpolatedTrackChunks)
self._interpolatedObsTrackXY= numpy.empty((len(self._interpolatedThetasTrack),6))
self._interpolatedObsTrackXY[:,0]=\
self._interpTrackX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,1]=\
self._interpTrackY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,2]=\
self._interpTrackZ(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,3]=\
self._interpTrackvX(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,4]=\
self._interpTrackvY(self._interpolatedThetasTrack)
self._interpolatedObsTrackXY[:,5]=\
self._interpTrackvZ(self._interpolatedThetasTrack)
#Also in cylindrical coordinates
self._interpolatedObsTrack= \
numpy.empty((len(self._interpolatedThetasTrack),6))
tR,tphi,tZ= coords.rect_to_cyl(self._interpolatedObsTrackXY[:,0],
self._interpolatedObsTrackXY[:,1],
self._interpolatedObsTrackXY[:,2])
tvR,tvT,tvZ=\
coords.rect_to_cyl_vec(self._interpolatedObsTrackXY[:,3],
self._interpolatedObsTrackXY[:,4],
self._interpolatedObsTrackXY[:,5],
tR,tphi,tZ,cyl=True)
self._interpolatedObsTrack[:,0]= tR
self._interpolatedObsTrack[:,1]= tvR
self._interpolatedObsTrack[:,2]= tvT
self._interpolatedObsTrack[:,3]= tZ
self._interpolatedObsTrack[:,4]= tvZ
self._interpolatedObsTrack[:,5]= tphi
return None
def _interpolate_stream_track_aA(self):
"""Build interpolations of the stream track in action-angle coordinates"""
if hasattr(self,'_interpolatedObsTrackAA'):
return None #Already did this
#Calculate 1D meanOmega on a fine grid in angle and interpolate
if not hasattr(self,'_interpolatedThetasTrack'):
self._interpolate_stream_track()
dmOs= numpy.array([self.meanOmega(da,oned=True,use_physical=False)
for da in self._interpolatedThetasTrack])
self._interpTrackAAdmeanOmegaOneD=\
interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,dmOs,k=3)
#Build the interpolated AA
self._interpolatedObsTrackAA=\
numpy.empty((len(self._interpolatedThetasTrack),6))
for ii in range(len(self._interpolatedThetasTrack)):
self._interpolatedObsTrackAA[ii,:3]=\
self._progenitor_Omega+dmOs[ii]*self._dsigomeanProgDirection\
*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
self._progenitor_angle+self._interpolatedThetasTrack[ii]\
*self._dsigomeanProgDirection*self._sigMeanSign
self._interpolatedObsTrackAA[ii,3:]=\
numpy.mod(self._interpolatedObsTrackAA[ii,3:],2.*numpy.pi)
return None
def calc_stream_lb(self,
vo=None,ro=None,
R0=None,Zsun=None,vsun=None):
"""
NAME:
calc_stream_lb
PURPOSE:
convert the stream track to observational coordinates and store
INPUT:
Coordinate transformation inputs (all default to the instance-wide
values):
vo= circular velocity to normalize velocities with
ro= Galactocentric radius to normalize positions with
R0= Galactocentric radius of the Sun (kpc)
Zsun= Sun's height above the plane (kpc)
vsun= Sun's motion in cylindrical coordinates (vR positive away from center)
OUTPUT:
(none)
HISTORY:
2013-12-02 - Written - Bovy (IAS)
"""
if vo is None:
vo= self._vo
if ro is None:
ro= self._ro
if R0 is None:
R0= self._R0
if Zsun is None:
Zsun= self._Zsun
if vsun is None:
vsun= self._vsun
self._ObsTrackLB= numpy.empty_like(self._ObsTrack)
XYZ= coords.galcencyl_to_XYZ(self._ObsTrack[:,0]*ro,
self._ObsTrack[:,5],
self._ObsTrack[:,3]*ro,
Xsun=R0,Zsun=Zsun).T
vXYZ= coords.galcencyl_to_vxvyvz(self._ObsTrack[:,1]*vo,
self._ObsTrack[:,2]*vo,
self._ObsTrack[:,4]*vo,
self._ObsTrack[:,5],
vsun=vsun,Xsun=R0,Zsun=Zsun).T
slbd=coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],slbd[:,2],
degree=True)
self._ObsTrackLB[:,0]= slbd[:,0]
self._ObsTrackLB[:,1]= slbd[:,1]
self._ObsTrackLB[:,2]= slbd[:,2]
self._ObsTrackLB[:,3]= svlbd[:,0]
self._ObsTrackLB[:,4]= svlbd[:,1]
self._ObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_interpolatedObsTrackXY'):
#Do the same for the interpolated track
self._interpolatedObsTrackLB=\
numpy.empty_like(self._interpolatedObsTrackXY)
XYZ=\
coords.galcenrect_to_XYZ(\
self._interpolatedObsTrackXY[:,0]*ro,
self._interpolatedObsTrackXY[:,1]*ro,
self._interpolatedObsTrackXY[:,2]*ro,
Xsun=R0,Zsun=Zsun).T
vXYZ=\
coords.galcenrect_to_vxvyvz(\
self._interpolatedObsTrackXY[:,3]*vo,
self._interpolatedObsTrackXY[:,4]*vo,
self._interpolatedObsTrackXY[:,5]*vo,
vsun=vsun,Xsun=R0,Zsun=Zsun).T
slbd=coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
svlbd= coords.vxvyvz_to_vrpmllpmbb(vXYZ[0],vXYZ[1],vXYZ[2],
slbd[:,0],slbd[:,1],
slbd[:,2],
degree=True)
self._interpolatedObsTrackLB[:,0]= slbd[:,0]
self._interpolatedObsTrackLB[:,1]= slbd[:,1]
self._interpolatedObsTrackLB[:,2]= slbd[:,2]
self._interpolatedObsTrackLB[:,3]= svlbd[:,0]
self._interpolatedObsTrackLB[:,4]= svlbd[:,1]
self._interpolatedObsTrackLB[:,5]= svlbd[:,2]
if hasattr(self,'_allErrCovsLBUnscaled'):
#Re-calculate this
self._determine_stream_spreadLB(simple=_USESIMPLE,
vo=vo,ro=ro,
R0=R0,Zsun=Zsun,vsun=vsun)
return None
def _find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""For backward compatibility"""
return self.find_closest_trackpoint(R,vR,vT,z,vz,phi,
interp=interp,xy=xy,
usev=usev)
def find_closest_trackpoint(self,R,vR,vT,z,vz,phi,interp=True,xy=False,
usev=False):
"""
NAME:
find_closest_trackpoint
PURPOSE:
find the closest point on the stream track to a given point
INPUT:
R,vR,vT,z,vz,phi - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
xy= (False) if True, input is X,Y,Z,vX,vY,vZ in Galactocentric rectangular coordinates; if xy, some coordinates may be missing (given as None) and they will not be used
usev= (False) if True, also use velocities to find the closest point
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-04 - Written - Bovy (IAS)
"""
if xy:
X= R
Y= vR
Z= vT
else:
X= R*numpy.cos(phi)
Y= R*numpy.sin(phi)
Z= z
if xy and usev:
vX= z
vY= vz
vZ= phi
elif usev:
vX= vR*numpy.cos(phi)-vT*numpy.sin(phi)
vY= vR*numpy.sin(phi)+vT*numpy.cos(phi)
vZ= vz
present= [not X is None,not Y is None,not Z is None]
if usev: present.extend([not vX is None,not vY is None,not vZ is None])
present= numpy.array(present,dtype='float')
if X is None: X= 0.
if Y is None: Y= 0.
if Z is None: Z= 0.
if usev and vX is None: vX= 0.
if usev and vY is None: vY= 0.
if usev and vZ is None: vZ= 0.
if interp:
dist2= present[0]*(X-self._interpolatedObsTrackXY[:,0])**2.\
+present[1]*(Y-self._interpolatedObsTrackXY[:,1])**2.\
+present[2]*(Z-self._interpolatedObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._interpolatedObsTrackXY[:,3])**2.\
+present[4]*(vY-self._interpolatedObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._interpolatedObsTrackXY[:,5])**2.
else:
dist2= present[0]*(X-self._ObsTrackXY[:,0])**2.\
+present[1]*(Y-self._ObsTrackXY[:,1])**2.\
+present[2]*(Z-self._ObsTrackXY[:,2])**2.
if usev:
dist2+= present[3]*(vX-self._ObsTrackXY[:,3])**2.\
+present[4]*(vY-self._ObsTrackXY[:,4])**2.\
+present[5]*(vZ-self._ObsTrackXY[:,5])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
return self.find_closest_trackpointLB(l,b,D,vlos,pmll,pmbb,
interp=interp,
usev=usev)
def find_closest_trackpointLB(self,l,b,D,vlos,pmll,pmbb,interp=True,
usev=False):
"""
NAME:
find_closest_trackpointLB
PURPOSE:
find the closest point on the stream track to a given point in (l,b,...) coordinates
INPUT:
l,b,D,vlos,pmll,pmbb- coordinates in (deg,deg,kpc,km/s,mas/yr,mas/yr)
interp= (True) if True, return the closest index on the interpolated track
usev= (False) if True, also use the velocity components (default is to only use the positions)
OUTPUT:
index of closest track point on the interpolated or not-interpolated track
HISTORY:
2013-12-17- Written - Bovy (IAS)
"""
if interp:
nTrackPoints= len(self._interpolatedThetasTrack)
else:
nTrackPoints= len(self._thetasTrack)
if l is None:
l= 0.
trackL= numpy.zeros(nTrackPoints)
elif interp:
trackL= self._interpolatedObsTrackLB[:,0]
else:
trackL= self._ObsTrackLB[:,0]
if b is None:
b= 0.
trackB= numpy.zeros(nTrackPoints)
elif interp:
trackB= self._interpolatedObsTrackLB[:,1]
else:
trackB= self._ObsTrackLB[:,1]
if D is None:
D= 1.
trackD= numpy.ones(nTrackPoints)
elif interp:
trackD= self._interpolatedObsTrackLB[:,2]
else:
trackD= self._ObsTrackLB[:,2]
if usev:
if vlos is None:
vlos= 0.
trackVlos= numpy.zeros(nTrackPoints)
elif interp:
trackVlos= self._interpolatedObsTrackLB[:,3]
else:
trackVlos= self._ObsTrackLB[:,3]
if pmll is None:
pmll= 0.
trackPmll= numpy.zeros(nTrackPoints)
elif interp:
trackPmll= self._interpolatedObsTrackLB[:,4]
else:
trackPmll= self._ObsTrackLB[:,4]
if pmbb is None:
pmbb= 0.
trackPmbb= numpy.zeros(nTrackPoints)
elif interp:
trackPmbb= self._interpolatedObsTrackLB[:,5]
else:
trackPmbb= self._ObsTrackLB[:,5]
#Calculate rectangular coordinates
XYZ= coords.lbd_to_XYZ(l,b,D,degree=True)
trackXYZ= coords.lbd_to_XYZ(trackL,trackB,trackD,degree=True)
if usev:
vxvyvz= coords.vrpmllpmbb_to_vxvyvz(vlos,pmll,pmbb,
XYZ[0],XYZ[1],XYZ[2],
XYZ=True)
trackvxvyvz= coords.vrpmllpmbb_to_vxvyvz(trackVlos,trackPmll,
trackPmbb,
trackXYZ[:,0],
trackXYZ[:,1],
trackXYZ[:,2],
XYZ=True)
#Calculate distance
dist2= (XYZ[0]-trackXYZ[:,0])**2.\
+(XYZ[1]-trackXYZ[:,1])**2.\
+(XYZ[2]-trackXYZ[:,2])**2.
if usev:
dist2+= (vxvyvz[0]-trackvxvyvz[:,0])**2.\
+(vxvyvz[1]-trackvxvyvz[:,1])**2.\
+(vxvyvz[2]-trackvxvyvz[:,2])**2.
return numpy.argmin(dist2)
def _find_closest_trackpointaA(self,Or,Op,Oz,ar,ap,az,interp=True):
"""
NAME:
_find_closest_trackpointaA
PURPOSE:
find the closest point on the stream track to a given point in
frequency-angle coordinates
INPUT:
Or,Op,Oz,ar,ap,az - phase-space coordinates of the given point
interp= (True), if True, return the index of the interpolated track
OUTPUT:
index into the track of the closest track point
HISTORY:
2013-12-22 - Written - Bovy (IAS)
"""
#Calculate angle offset along the stream parallel to the stream track,
# finding first the angle among a few wraps where the point is
# closest to the parallel track and then the closest trackpoint to that
# point
da= numpy.stack(\
numpy.meshgrid(_TWOPIWRAPS+ar-self._progenitor_angle[0],
_TWOPIWRAPS+ap-self._progenitor_angle[1],
_TWOPIWRAPS+az-self._progenitor_angle[2],
indexing='xy')).T.reshape((len(_TWOPIWRAPS)**3,3))
dapar= self._sigMeanSign*numpy.dot(da[numpy.argmin(numpy.linalg.norm(\
numpy.cross(da,self._dsigomeanProgDirection),axis=1))],
self._dsigomeanProgDirection)
if interp:
dist= numpy.fabs(dapar-self._interpolatedThetasTrack)
else:
dist= numpy.fabs(dapar-self._thetasTrack)
return numpy.argmin(dist)
#########DISTRIBUTION AS A FUNCTION OF ANGLE ALONG THE STREAM##################
def pOparapar(self,Opar,apar,tdisrupt=None):
"""
NAME:
pOparapar
PURPOSE:
return the probability of a given parallel (frequency,angle) offset pair
INPUT:
Opar - parallel frequency offset (array) (can be Quantity)
apar - parallel angle offset along the stream (scalar) (can be Quantity)
OUTPUT:
p(Opar,apar)
HISTORY:
2015-12-07 - Written - Bovy (UofT)
"""
Opar= conversion.parse_frequency(Opar,ro=self._ro,vo=self._vo)
apar= conversion.parse_angle(apar)
if tdisrupt is None: tdisrupt= self._tdisrupt
if isinstance(Opar,(int,float,numpy.float32,numpy.float64)):
Opar= numpy.array([Opar])
out= numpy.zeros(len(Opar))
# Compute ts
ts= apar/Opar
# Evaluate
out[(ts < tdisrupt)*(ts >= 0.)]=\
numpy.exp(-0.5*(Opar[(ts < tdisrupt)*(ts >= 0.)]-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
def density_par(self,dangle,coord='apar',tdisrupt=None,
**kwargs):
"""
NAME:
density_par
PURPOSE:
calculate the density as a function of a parallel coordinate
INPUT:
dangle - parallel angle offset for this coordinate value
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
density(angle)
HISTORY:
2015-11-17 - Written - Bovy (UofT)
"""
if coord.lower() != 'apar':
# Need to compute the Jacobian for this coordinate value
ddangle= dangle+10.**-7.
ddangle-= dangle
if coord.lower() == 'phi':
phi_h= coords.rect_to_cyl(\
self._interpTrackX(dangle+ddangle),
self._interpTrackY(dangle+ddangle),
self._interpTrackZ(dangle+ddangle))
phi= coords.rect_to_cyl(\
self._interpTrackX(dangle),
self._interpTrackY(dangle),
self._interpTrackZ(dangle))
jac= numpy.fabs(phi_h[1]-phi[1])/ddangle
elif coord.lower() == 'll' or coord.lower() == 'ra' \
or coord.lower() == 'customra':
XYZ_h= coords.galcenrect_to_XYZ(\
self._interpTrackX(dangle+ddangle)*self._ro,
self._interpTrackY(dangle+ddangle)*self._ro,
self._interpTrackZ(dangle+ddangle)*self._ro,
Xsun=self._R0,Zsun=self._Zsun)
lbd_h= coords.XYZ_to_lbd(XYZ_h[0],XYZ_h[1],XYZ_h[2],
degree=True)
XYZ= coords.galcenrect_to_XYZ(\
self._interpTrackX(dangle)*self._ro,
self._interpTrackY(dangle)*self._ro,
self._interpTrackZ(dangle)*self._ro,
Xsun=self._R0,Zsun=self._Zsun)
lbd= coords.XYZ_to_lbd(XYZ[0],XYZ[1],XYZ[2],
degree=True)
if coord.lower() == 'll':
jac= numpy.fabs(lbd_h[0]-lbd[0])/ddangle
else:
radec_h= coords.lb_to_radec(lbd_h[0],
lbd_h[1],
degree=True)
radec= coords.lb_to_radec(lbd[0],
lbd[1],
degree=True)
if coord.lower() == 'ra':
jac= numpy.fabs(radec_h[0]-radec[0])/ddangle
else:
xieta_h= coords.radec_to_custom(\
radec_h[0],radec_h[1],T=self._custom_transform,
degree=True)
xieta= coords.radec_to_custom(\
radec[0],radec[1],T=self._custom_transform,
degree=True)
jac= numpy.fabs(xieta_h[0]-xieta[0])/ddangle
else:
raise ValueError('Coordinate input %s not supported by density_par' % coord)
else:
jac= 1.
return self._density_par(dangle,tdisrupt=tdisrupt,**kwargs)/jac
def _density_par(self,dangle,tdisrupt=None):
"""The raw density as a function of parallel angle"""
if tdisrupt is None: tdisrupt= self._tdisrupt
dOmin= dangle/tdisrupt
# Normalize to 1 close to progenitor
return 0.5\
*(1.+special.erf((self._meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2])))
def length(self,threshold=0.2,phys=False,ang=False,tdisrupt=None,
**kwargs):
"""
NAME:
length
PURPOSE:
calculate the length of the stream
INPUT:
threshold - threshold down from the density near the progenitor at which to define the 'end' of the stream
phys= (False) if True, return the length in physical kpc
ang= (False) if True, return the length in sky angular arc length in degree
coord - coordinate to return the density in ('apar' [default],
'll','ra','customra','phi')
OUTPUT:
length (rad for parallel angle; kpc for physical length; deg for sky arc length)
HISTORY:
2015-12-22 - Written - Bovy (UofT)
"""
peak_dens= self.density_par(0.1,tdisrupt=tdisrupt,**kwargs) # assume that this is the peak
try:
result=\
optimize.brentq(lambda x: self.density_par(x,
tdisrupt=tdisrupt,
**kwargs)\
-peak_dens*threshold,
0.1,self._deltaAngleTrack)
except RuntimeError: #pragma: no cover
raise RuntimeError('Length could not be returned, because length method failed to find the threshold value')
except ValueError:
raise ValueError('Length could not be returned, because length method failed to initialize')
if phys:
# Need to now integrate length
dXda= self._interpTrackX.derivative()
dYda= self._interpTrackY.derivative()
dZda= self._interpTrackZ.derivative()
result= integrate.quad(lambda da: numpy.sqrt(dXda(da)**2.\
+dYda(da)**2.\
+dZda(da)**2.),
0.,result)[0]*self._ro
elif ang:
# Need to now integrate length
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,0],-1)
-self._interpolatedObsTrackLB[:,0]) > 0.:
ll= dePeriod(self._interpolatedObsTrackLB[:,0][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
ll= dePeriod(self._interpolatedObsTrackLB[::-1,0][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
if numpy.median(numpy.roll(self._interpolatedObsTrackLB[:,1],-1)
-self._interpolatedObsTrackLB[:,1]) > 0.:
bb= dePeriod(self._interpolatedObsTrackLB[:,1][:,numpy.newaxis].T*numpy.pi/180.).T*180./numpy.pi
else:
bb= dePeriod(self._interpolatedObsTrackLB[::-1,1][:,numpy.newaxis].T*numpy.pi/180.).T[::-1]*180./numpy.pi
dlda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,ll,k=3).derivative()
dbda= interpolate.InterpolatedUnivariateSpline(\
self._interpolatedThetasTrack,bb,k=3).derivative()
result= integrate.quad(lambda da: numpy.sqrt(dlda(da)**2.\
+dbda(da)**2.),
0.,result)[0]
return result
@physical_conversion('frequency',pop=True)
def meanOmega(self,dangle,oned=False,offset_sign=None,
tdisrupt=None):
"""
NAME:
meanOmega
PURPOSE:
calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption)
offset_sign= sign of the frequency offset (shouldn't be set)
OUTPUT:
mean Omega
HISTORY:
2013-12-01 - Written - Bovy (IAS)
"""
if offset_sign is None: offset_sign= self._sigMeanSign
if tdisrupt is None: tdisrupt= self._tdisrupt
dOmin= dangle/tdisrupt
meandO= self._meandO
dO1D= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO)
if oned: return dO1D
else:
return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\
*offset_sign
@physical_conversion('frequency',pop=True)
def sigOmega(self,dangle):
"""
NAME:
sigmaOmega
PURPOSE:
calculate the 1D sigma in frequency as a function of angle, assuming a uniform time distribution up to a maximum time
INPUT:
dangle - angle offset
OUTPUT:
sigma Omega
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
dOmin= dangle/self._tdisrupt
meandO= self._meandO
sO1D2= ((numpy.sqrt(2./numpy.pi)*numpy.sqrt(self._sortedSigOEig[2])\
*(meandO+dOmin)\
*numpy.exp(-0.5*(meandO-dOmin)**2.\
/self._sortedSigOEig[2])/
(1.+special.erf((meandO-dOmin)\
/numpy.sqrt(2.*self._sortedSigOEig[2]))))\
+meandO**2.+self._sortedSigOEig[2])
mO= self.meanOmega(dangle,oned=True,use_physical=False)
return numpy.sqrt(sO1D2-mO**2.)
def ptdAngle(self,t,dangle):
"""
NAME:
ptdangle
PURPOSE:
return the probability of a given stripping time at a given angle along the stream
INPUT:
t - stripping time
dangle - angle offset along the stream
OUTPUT:
p(td|dangle)
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
if isinstance(t,(int,float,numpy.float32,numpy.float64)):
t= numpy.array([t])
out= numpy.zeros(len(t))
if t > 0.:
dO= dangle/t[t < self._tdisrupt]
else:
return 0.
#p(t|a) = \int dO p(O,t|a) = \int dO p(t|O,a) p(O|a) = \int dO delta (t-a/O)p(O|a) = O*2/a p(O|a); p(O|a) = \int dt p(a|O,t) p(O)p(t) = 1/O p(O)
out[t < self._tdisrupt]=\
dO**2./dangle*numpy.exp(-0.5*(dO-self._meandO)**2.\
/self._sortedSigOEig[2])/\
numpy.sqrt(self._sortedSigOEig[2])
return out
@physical_conversion('time',pop=True)
def meantdAngle(self,dangle):
"""
NAME:
meantdAngle
PURPOSE:
calculate the mean stripping time at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
mean stripping time at this dangle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.*numpy.sqrt(self._sortedSigOEig[2]))
num= integrate.quad(lambda x: x*self.ptdAngle(x,dangle),
Tlow,Thigh)[0]
denom= integrate.quad(self.ptdAngle,Tlow,Thigh,(dangle,))[0]
if denom == 0.: return self._tdisrupt
elif numpy.isnan(denom): return 0.
else: return num/denom
@physical_conversion('time',pop=True)
def sigtdAngle(self,dangle):
"""
NAME:
sigtdAngle
PURPOSE:
calculate the dispersion in the stripping times at a given angle
INPUT:
dangle - angle offset along the stream
OUTPUT:
dispersion in the stripping times at this angle
HISTORY:
2013-12-05 - Written - Bovy (IAS)
"""
Tlow= dangle/(self._meandO+3.*numpy.sqrt(self._sortedSigOEig[2]))
Thigh= dangle/(self._meandO-3.* | numpy.sqrt(self._sortedSigOEig[2]) | numpy.sqrt |
from __future__ import print_function, division, absolute_import
import time
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import cv2
import shapely
import shapely.geometry
import imgaug as ia
from imgaug.testutils import reseed
def main():
time_start = time.time()
test_is_np_array()
test_is_single_integer()
test_is_single_float()
test_is_single_number()
test_is_iterable()
test_is_string()
test_is_single_bool()
test_is_integer_array()
test_is_float_array()
test_is_callable()
test_caller_name()
test_seed()
test_current_random_state()
test_new_random_state()
test_dummy_random_state()
test_copy_random_state()
test_derive_random_state()
test_derive_random_states()
test_forward_random_state()
# test_quokka()
# test_quokka_square()
# test_angle_between_vectors()
# test_draw_text()
test_imresize_many_images()
test_imresize_single_image()
test_pad()
test_compute_paddings_for_aspect_ratio()
test_pad_to_aspect_ratio()
test_pool()
test_avg_pool()
test_max_pool()
test_draw_grid()
# test_show_grid()
# test_do_assert()
# test_HooksImages_is_activated()
# test_HooksImages_is_propagating()
# test_HooksImages_preprocess()
# test_HooksImages_postprocess()
test_Keypoint()
test_KeypointsOnImage()
test_BoundingBox()
test_BoundingBoxesOnImage()
# test_HeatmapsOnImage_get_arr()
# test_HeatmapsOnImage_find_global_maxima()
test_HeatmapsOnImage_draw()
test_HeatmapsOnImage_draw_on_image()
test_HeatmapsOnImage_invert()
test_HeatmapsOnImage_pad()
# test_HeatmapsOnImage_pad_to_aspect_ratio()
test_HeatmapsOnImage_avg_pool()
test_HeatmapsOnImage_max_pool()
test_HeatmapsOnImage_scale()
# test_HeatmapsOnImage_to_uint8()
# test_HeatmapsOnImage_from_uint8()
# test_HeatmapsOnImage_from_0to1()
# test_HeatmapsOnImage_change_normalization()
# test_HeatmapsOnImage_copy()
# test_HeatmapsOnImage_deepcopy()
test_SegmentationMapOnImage_bool()
test_SegmentationMapOnImage_get_arr_int()
# test_SegmentationMapOnImage_get_arr_bool()
test_SegmentationMapOnImage_draw()
test_SegmentationMapOnImage_draw_on_image()
test_SegmentationMapOnImage_pad()
test_SegmentationMapOnImage_pad_to_aspect_ratio()
test_SegmentationMapOnImage_scale()
test_SegmentationMapOnImage_to_heatmaps()
test_SegmentationMapOnImage_from_heatmaps()
test_SegmentationMapOnImage_copy()
test_SegmentationMapOnImage_deepcopy()
test_Polygon___init__()
test_Polygon_xx()
test_Polygon_yy()
test_Polygon_xx_int()
test_Polygon_yy_int()
test_Polygon_is_valid()
test_Polygon_area()
test_Polygon_project()
test_Polygon__compute_inside_image_point_mask()
test_Polygon_is_fully_within_image()
test_Polygon_is_partly_within_image()
test_Polygon_is_out_of_image()
test_Polygon_cut_out_of_image()
test_Polygon_clip_out_of_image()
test_Polygon_shift()
test_Polygon_draw_on_image()
test_Polygon_extract_from_image()
test_Polygon_to_shapely_polygon()
test_Polygon_to_bounding_box()
test_Polygon_from_shapely()
test_Polygon_copy()
test_Polygon_deepcopy()
test_Polygon___repr__()
test_Polygon___str__()
# test_Batch()
test_BatchLoader()
# test_BackgroundAugmenter.get_batch()
# test_BackgroundAugmenter._augment_images_worker()
# test_BackgroundAugmenter.terminate()
time_end = time.time()
print("<%s> Finished without errors in %.4fs." % (__file__, time_end - time_start,))
def test_is_np_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((64, 64, 3), dtype=np.uint8),
np.zeros((1, 2), dtype=np.float32),
np.zeros((100,), dtype=np.float64)
]
values_false = [
"A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(),
-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4
]
for value in values_true:
assert ia.is_np_array(value) is True
for value in values_false:
assert ia.is_np_array(value) is False
def test_is_single_integer():
assert ia.is_single_integer("A") is False
assert ia.is_single_integer(None) is False
assert ia.is_single_integer(1.2) is False
assert ia.is_single_integer(1.0) is False
assert ia.is_single_integer(np.ones((1,), dtype=np.float32)[0]) is False
assert ia.is_single_integer(1) is True
assert ia.is_single_integer(1234) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.uint8)[0]) is True
assert ia.is_single_integer(np.ones((1,), dtype=np.int32)[0]) is True
def test_is_single_float():
assert ia.is_single_float("A") is False
assert ia.is_single_float(None) is False
assert ia.is_single_float(1.2) is True
assert ia.is_single_float(1.0) is True
assert ia.is_single_float(np.ones((1,), dtype=np.float32)[0]) is True
assert ia.is_single_float(1) is False
assert ia.is_single_float(1234) is False
assert ia.is_single_float(np.ones((1,), dtype=np.uint8)[0]) is False
assert ia.is_single_float(np.ones((1,), dtype=np.int32)[0]) is False
def test_caller_name():
assert ia.caller_name() == 'test_caller_name'
def test_is_single_number():
class _Dummy(object):
pass
values_true = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4]
values_false = ["A", "BC", "1", True, False, (1.0, 2.0), [1.0, 2.0], _Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_single_number(value) is True
for value in values_false:
assert ia.is_single_number(value) is False
def test_is_iterable():
class _Dummy(object):
pass
values_true = [
[0, 1, 2],
["A", "X"],
[[123], [456, 789]],
[],
(1, 2, 3),
(1,),
tuple(),
"A",
"ABC",
"",
np.zeros((100,), dtype=np.uint8)
]
values_false = [1, 100, 0, -100, -1, 1.2, -1.2, True, False, _Dummy()]
for value in values_true:
assert ia.is_iterable(value) is True, value
for value in values_false:
assert ia.is_iterable(value) is False
def test_is_string():
class _Dummy(object):
pass
values_true = ["A", "BC", "1", ""]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False, (1.0, 2.0), [1.0, 2.0],
_Dummy(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_string(value) is True
for value in values_false:
assert ia.is_string(value) is False
def test_is_single_bool():
class _Dummy(object):
pass
values_true = [False, True]
values_false = [-100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, (1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8), np.zeros((1,), dtype=bool)]
for value in values_true:
assert ia.is_single_bool(value) is True
for value in values_false:
assert ia.is_single_bool(value) is False
def test_is_integer_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_integer_array(value) is True
for value in values_false:
assert ia.is_integer_array(value) is False
def test_is_float_array():
class _Dummy(object):
pass
values_true = [
np.zeros((1, 2), dtype=np.float16),
np.zeros((100,), dtype=np.float32),
np.zeros((1, 2), dtype=np.float64)
]
values_false = [
"A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy(),
np.zeros((1, 2), dtype=np.uint8),
np.zeros((100,), dtype=np.uint8),
np.zeros((1, 2), dtype=np.uint16),
np.zeros((1, 2), dtype=np.int32),
np.zeros((1, 2), dtype=np.int64),
np.zeros((1, 2), dtype=np.bool)
]
for value in values_true:
assert ia.is_float_array(value) is True
for value in values_false:
assert ia.is_float_array(value) is False
def test_is_callable():
def _dummy_func():
pass
_dummy_func2 = lambda x: x
class _Dummy1(object):
pass
class _Dummy2(object):
def __call__(self):
pass
values_true = [_dummy_func, _dummy_func2, _Dummy2()]
values_false = ["A", "BC", "1", "", -100, 1, 0, 1, 100, -1.2, -0.001, 0.0, 0.001, 1.2, 1e-4, True, False,
(1.0, 2.0), [1.0, 2.0], _Dummy1(), np.zeros((1, 2), dtype=np.uint8)]
for value in values_true:
assert ia.is_callable(value) == True
for value in values_false:
assert ia.is_callable(value) == False
def test_seed():
ia.seed(10017)
rs = np.random.RandomState(10017)
assert ia.CURRENT_RANDOM_STATE.randint(0, 1000*1000) == rs.randint(0, 1000*1000)
reseed()
def test_current_random_state():
assert ia.current_random_state() == ia.CURRENT_RANDOM_STATE
def test_new_random_state():
seed = 1000
ia.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=False)
rs_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=False)
rs_observed2 = ia.new_random_state(seed=None, fully_random=False)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
ia.seed(seed)
np.random.seed(seed)
rs_observed = ia.new_random_state(seed=None, fully_random=True)
rs_not_expected = np.random.RandomState(np.random.RandomState(seed).randint(0, 10**6, 1)[0])
assert rs_observed.randint(0, 10**6) != rs_not_expected.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=None, fully_random=True)
rs_observed2 = ia.new_random_state(seed=None, fully_random=True)
assert rs_observed1.randint(0, 10**6) != rs_observed2.randint(0, 10**6)
rs_observed1 = ia.new_random_state(seed=1234)
rs_observed2 = ia.new_random_state(seed=1234)
rs_expected = np.random.RandomState(1234)
assert rs_observed1.randint(0, 10**6) == rs_observed2.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_dummy_random_state():
assert ia.dummy_random_state().randint(0, 10**6) == np.random.RandomState(1).randint(0, 10**6)
def test_copy_random_state():
rs = np.random.RandomState(1017)
rs_copy = ia.copy_random_state(rs)
assert rs != rs_copy
assert rs.randint(0, 10**6) == rs_copy.randint(0, 10**6)
assert ia.copy_random_state(np.random) == np.random
assert ia.copy_random_state(np.random, force_copy=True) != np.random
def test_derive_random_state():
rs = np.random.RandomState(1017)
rs_observed = ia.derive_random_state(np.random.RandomState(1017))
rs_expected = np.random.RandomState(np.random.RandomState(1017).randint(0, 10**6))
assert rs_observed.randint(0, 10**6) == rs_expected.randint(0, 10**6)
def test_derive_random_states():
rs_observed1, rs_observed2 = ia.derive_random_states(np.random.RandomState(1017), n=2)
seed = np.random.RandomState(1017).randint(0, 10**6)
rs_expected1 = np.random.RandomState(seed+0)
rs_expected2 = np.random.RandomState(seed+1)
assert rs_observed1.randint(0, 10**6) == rs_expected1.randint(0, 10**6)
assert rs_observed2.randint(0, 10**6) == rs_expected2.randint(0, 10**6)
def test_forward_random_state():
rs1 = np.random.RandomState(1017)
rs2 = np.random.RandomState(1017)
ia.forward_random_state(rs1)
rs2.uniform()
assert rs1.randint(0, 10**6) == rs2.randint(0, 10**6)
def test_imresize_many_images():
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for c in [1, 3]:
image1 = np.zeros((16, 16, c), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, c), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, c), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, c), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, c), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, c), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, c), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, c), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, c), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
for images_this_iter in [images, list(images)]: # test for ndarray and list(ndarray) input
for interpolation in interpolations:
images_same_observed = ia.imresize_many_images(images_this_iter, (16, 16), interpolation=interpolation)
for image_expected, image_observed in zip(images_this_iter, images_same_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
images_small_observed = ia.imresize_many_images(images_this_iter, (8, 8), interpolation=interpolation)
for image_expected, image_observed in zip(images_small, images_small_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
images_large_observed = ia.imresize_many_images(images_this_iter, (32, 32), interpolation=interpolation)
for image_expected, image_observed in zip(images_large, images_large_observed):
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
# test size given as single int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 8)
assert observed.shape == (1, 8, 8, 3)
# test size given as single float
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 2.0)
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, 0.5)
assert observed.shape == (1, 2, 2, 3)
# test size given as (float, float)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 2.0))
assert observed.shape == (1, 8, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 0.5))
assert observed.shape == (1, 2, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 0.5))
assert observed.shape == (1, 8, 2, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (0.5, 2.0))
assert observed.shape == (1, 2, 8, 3)
# test size given as int+float or float+int
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (11, 2.0))
assert observed.shape == (1, 11, 8, 3)
images = np.zeros((1, 4, 4, 3), dtype=np.uint8)
observed = ia.imresize_many_images(images, (2.0, 11))
assert observed.shape == (1, 8, 11, 3)
# test no channels
images = np.zeros((1, 4, 4), dtype=np.uint8)
images_rs = ia.imresize_many_images(images, (2, 2))
assert images_rs.shape == (1, 2, 2)
images = [np.zeros((4, 4), dtype=np.uint8)]
images_rs = ia.imresize_many_images(images, (2, 2))
assert isinstance(images_rs, list)
assert images_rs[0].shape == (2, 2)
# test len 0 input
observed = ia.imresize_many_images(np.zeros((0, 8, 8, 3), dtype=np.uint8), (4, 4))
assert ia.is_np_array(observed)
assert observed.dtype.type == np.uint8
assert len(observed) == 0
observed = ia.imresize_many_images([], (4, 4))
assert isinstance(observed, list)
assert len(observed) == 0
# test images with zero height/width
images = [np.zeros((0, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((4, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
images = [np.zeros((0, 0, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=(2, 2))
except Exception as exc:
assert "Cannot resize images, because at least one image has a height and/or width of zero." in str(exc)
got_exception = True
assert got_exception
# test invalid sizes
sizes_all = [(-1, 2), (0, 2)]
sizes_all = sizes_all\
+ [(float(a), b) for a, b in sizes_all]\
+ [(a, float(b)) for a, b in sizes_all]\
+ [(float(a), float(b)) for a, b in sizes_all]\
+ [(-a, -b) for a, b in sizes_all]\
+ [(-float(a), -b) for a, b in sizes_all]\
+ [(-a, -float(b)) for a, b in sizes_all]\
+ [(-float(a), -float(b)) for a, b in sizes_all]
sizes_all = sizes_all\
+ [(b, a) for a, b in sizes_all]
sizes_all = sizes_all\
+ [-1.0, 0.0, -1, 0]
for sizes in sizes_all:
images = [np.zeros((4, 4, 3), dtype=np.uint8)]
got_exception = False
try:
_ = ia.imresize_many_images(images, sizes=sizes)
except Exception as exc:
assert "value is zero or lower than zero." in str(exc)
got_exception = True
assert got_exception
# test list input but all with same shape
images = [np.zeros((8, 8, 3), dtype=np.uint8) for _ in range(2)]
observed = ia.imresize_many_images(images, (4, 4))
assert isinstance(observed, list)
assert all([image.shape == (4, 4, 3) for image in observed])
assert all([image.dtype.type == np.uint8 for image in observed])
def test_imresize_single_image():
for c in [-1, 1, 3]:
image1 = np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255
image2 = np.zeros((16, 16, abs(c)), dtype=np.uint8)
image3 = np.pad(
np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255,
((4, 4), (4, 4), (0, 0)),
mode="constant",
constant_values=0
)
image1_small = np.zeros((8, 8, abs(c)), dtype=np.uint8) + 255
image2_small = np.zeros((8, 8, abs(c)), dtype=np.uint8)
image3_small = np.pad(
np.zeros((4, 4, abs(c)), dtype=np.uint8) + 255,
((2, 2), (2, 2), (0, 0)),
mode="constant",
constant_values=0
)
image1_large = np.zeros((32, 32, abs(c)), dtype=np.uint8) + 255
image2_large = np.zeros((32, 32, abs(c)), dtype=np.uint8)
image3_large = np.pad(
np.zeros((16, 16, abs(c)), dtype=np.uint8) + 255,
((8, 8), (8, 8), (0, 0)),
mode="constant",
constant_values=0
)
images = np.uint8([image1, image2, image3])
images_small = np.uint8([image1_small, image2_small, image3_small])
images_large = np.uint8([image1_large, image2_large, image3_large])
if c == -1:
images = images[:, :, 0]
images_small = images_small[:, :, 0]
images_large = images_large[:, :, 0]
interpolations = [None,
"nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
for interpolation in interpolations:
for image in images:
image_observed = ia.imresize_single_image(image, (16, 16), interpolation=interpolation)
diff = np.abs(image.astype(np.int32) - image_observed.astype(np.int32))
assert np.sum(diff) == 0
for interpolation in interpolations:
for image, image_expected in zip(images, images_small):
image_observed = ia.imresize_single_image(image, (8, 8), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
for interpolation in interpolations:
for image, image_expected in zip(images, images_large):
image_observed = ia.imresize_single_image(image, (32, 32), interpolation=interpolation)
diff = np.abs(image_expected.astype(np.int32) - image_observed.astype(np.int32))
diff_fraction = np.sum(diff) / (image_observed.size * 255)
assert diff_fraction < 0.5
def test_pad():
# -------
# uint8, int32
# -------
for dtype in [np.uint8, np.int32]:
arr = np.zeros((3, 3), dtype=dtype) + 255
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.array_equal(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, -1] == 0)
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[-1, :] == 0)
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[:, 0] == 0)
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 0)
assert np.all(arr_pad[:, -2:] == 0)
assert np.all(arr_pad[-3:, :] == 0)
assert np.all(arr_pad[:, :4] == 0)
arr_pad = ia.pad(arr, top=1, cval=10)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :] == 10)
arr = np.zeros((3, 3, 3), dtype=dtype) + 128
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.all(arr_pad[0, :, 0] == 0)
assert np.all(arr_pad[0, :, 1] == 0)
assert np.all(arr_pad[0, :, 2] == 0)
arr = np.zeros((3, 3), dtype=dtype) + 128
arr[1, 1] = 200
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 128
assert arr_pad[0, 1] == 200
assert arr_pad[0, 2] == 128
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=123)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 123
assert arr_pad[0, 1] == 123
assert arr_pad[0, 2] == 123
assert arr_pad[1, 0] == 0
arr = np.zeros((1, 1), dtype=dtype) + 100
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=200)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert arr_pad[0, 0] == 200
assert arr_pad[1, 0] == 175
assert arr_pad[2, 0] == 150
assert arr_pad[3, 0] == 125
assert arr_pad[4, 0] == 100
# -------
# float32, float64
# -------
for dtype in [np.float32, np.float64]:
arr = np.zeros((3, 3), dtype=dtype) + 1.0
arr_pad = ia.pad(arr)
assert arr_pad.shape == (3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad, arr)
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, right=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, -1], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, bottom=1)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[-1, :], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, left=1)
assert arr_pad.shape == (3, 4)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[:, 0], dtype([0, 0, 0]))
arr_pad = ia.pad(arr, top=1, right=2, bottom=3, left=4)
assert arr_pad.shape == (3+(1+3), 3+(2+4))
assert arr_pad.dtype.type == dtype
assert 0 - 1e-6 < np.max(arr_pad[0, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, -2:]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[-3, :]) < 0 + 1e-6
assert 0 - 1e-6 < np.max(arr_pad[:, :4]) < 0 + 1e-6
arr_pad = ia.pad(arr, top=1, cval=0.2)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :], dtype([0.2, 0.2, 0.2]))
arr = np.zeros((3, 3, 3), dtype=dtype) + 0.5
arr_pad = ia.pad(arr, top=1)
assert arr_pad.shape == (4, 3, 3)
assert arr_pad.dtype.type == dtype
assert np.allclose(arr_pad[0, :, 0], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 1], dtype([0, 0, 0]))
assert np.allclose(arr_pad[0, :, 2], dtype([0, 0, 0]))
arr = np.zeros((3, 3), dtype=dtype) + 0.5
arr[1, 1] = 0.75
arr_pad = ia.pad(arr, top=1, mode="maximum")
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.50 - 1e-6 < arr_pad[0, 0] < 0.50 + 1e-6
assert 0.75 - 1e-6 < arr_pad[0, 1] < 0.75 + 1e-6
assert 0.50 - 1e-6 < arr_pad[0, 2] < 0.50 + 1e-6
arr = np.zeros((3, 3), dtype=dtype)
arr_pad = ia.pad(arr, top=1, mode="constant", cval=0.4)
assert arr_pad.shape == (4, 3)
assert arr_pad.dtype.type == dtype
assert 0.4 - 1e-6 < arr_pad[0, 0] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 1] < 0.4 + 1e-6
assert 0.4 - 1e-6 < arr_pad[0, 2] < 0.4 + 1e-6
assert 0.0 - 1e-6 < arr_pad[1, 0] < 0.0 + 1e-6
arr = np.zeros((1, 1), dtype=dtype) + 0.6
arr_pad = ia.pad(arr, top=4, mode="linear_ramp", cval=1.0)
assert arr_pad.shape == (5, 1)
assert arr_pad.dtype.type == dtype
assert 1.0 - 1e-6 < arr_pad[0, 0] < 1.0 + 1e-6
assert 0.9 - 1e-6 < arr_pad[1, 0] < 0.9 + 1e-6
assert 0.8 - 1e-6 < arr_pad[2, 0] < 0.8 + 1e-6
assert 0.7 - 1e-6 < arr_pad[3, 0] < 0.7 + 1e-6
assert 0.6 - 1e-6 < arr_pad[4, 0] < 0.6 + 1e-6
def test_compute_paddings_for_aspect_ratio():
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 0
assert bottom == 0
assert left == 0
arr = np.zeros((1, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 2
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 1), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 1
arr = np.zeros((2, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 1
assert right == 0
assert bottom == 1
assert left == 0
arr = np.zeros((4, 2), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 1.0)
assert top == 0
assert right == 1
assert bottom == 0
assert left == 1
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 0.5)
assert top == 2
assert right == 0
assert bottom == 2
assert left == 0
arr = np.zeros((4, 4), dtype=np.uint8)
top, right, bottom, left = ia.compute_paddings_for_aspect_ratio(arr, 2.0)
assert top == 0
assert right == 2
assert bottom == 0
assert left == 2
def test_pad_to_aspect_ratio():
for dtype in [np.uint8, np.int32, np.float32]:
# aspect_ratio = 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((1, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 1), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((2, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
arr = np.zeros((4, 2), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
# aspect_ratio != 1.0
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
arr = np.zeros((4, 4), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 0.5)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 8
assert arr_pad.shape[1] == 4
# 3d arr
arr = np.zeros((4, 2, 3), dtype=dtype)
arr_pad = ia.pad_to_aspect_ratio(arr, 1.0)
assert arr_pad.dtype.type == dtype
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 4
assert arr_pad.shape[2] == 3
# cval
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 0
assert np.max(arr_pad[:, -2:]) == 0
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=10)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[:, 0:2]) == 10
assert np.max(arr_pad[:, -2:]) == 10
assert np.max(arr_pad[:, 2:-2]) == 128
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.0)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0 + 1e-6
assert 0 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
arr = np.zeros((4, 4), dtype=np.float32) + 0.5
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, cval=0.1)
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert 0.1 - 1e-6 <= np.max(arr_pad[:, 0:2]) <= 0.1 + 1e-6
assert 0.1 - 1e-6 <= np.max(arr_pad[:, -2:]) <= 0.1 + 1e-6
assert 0.5 - 1e-6 <= np.max(arr_pad[:, 2:-2]) <= 0.5 + 1e-6
# mode
arr = np.zeros((4, 4), dtype=np.uint8) + 128
arr[1:3, 1:3] = 200
arr_pad = ia.pad_to_aspect_ratio(arr, 2.0, mode="maximum")
assert arr_pad.shape[0] == 4
assert arr_pad.shape[1] == 8
assert np.max(arr_pad[0:1, 0:2]) == 128
assert np.max(arr_pad[1:3, 0:2]) == 200
assert np.max(arr_pad[3:, 0:2]) == 128
assert np.max(arr_pad[0:1, -2:]) == 128
assert np.max(arr_pad[1:3, -2:]) == 200
assert np.max(arr_pad[3:, -2:]) == 128
# TODO add tests for return_pad_values=True
def test_pool():
# basic functionality with uint8, int32, float32
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.int32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# preserve_dtype off
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.average, preserve_dtype=False)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == np.float64
assert np.allclose(arr_pooled[0, 0], np.average([0, 1, 4, 5]))
assert np.allclose(arr_pooled[0, 1], np.average([2, 3, 6, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 9, 12, 13]))
assert np.allclose(arr_pooled[1, 1], np.average([10, 11, 14, 15]))
# maximum function
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, 2, np.max)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
# 3d array
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr = np.tile(arr[..., np.newaxis], (1, 1, 3))
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2, 3)
assert np.array_equal(arr_pooled[..., 0], arr_pooled[..., 1])
assert np.array_equal(arr_pooled[..., 1], arr_pooled[..., 2])
arr_pooled = arr_pooled[..., 0]
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
# block_size per axis
arr = np.float32([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.pool(arr, (2, 1), np.average)
assert arr_pooled.shape == (2, 4)
assert arr_pooled.dtype == arr.dtype.type
assert np.allclose(arr_pooled[0, 0], np.average([0, 4]))
assert np.allclose(arr_pooled[0, 1], np.average([1, 5]))
assert np.allclose(arr_pooled[0, 2], np.average([2, 6]))
assert np.allclose(arr_pooled[0, 3], np.average([3, 7]))
assert np.allclose(arr_pooled[1, 0], np.average([8, 12]))
assert np.allclose(arr_pooled[1, 1], np.average([9, 13]))
assert np.allclose(arr_pooled[1, 2], np.average([10, 14]))
assert np.allclose(arr_pooled[1, 3], np.average([11, 15]))
# cval
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 0, 6, 0]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 0, 0]))
assert arr_pooled[1, 1] == int(np.average([10, 0, 0, 0]))
arr = np.uint8([
[0, 1],
[4, 5]
])
arr_pooled = ia.pool(arr, (4, 1), np.average)
assert arr_pooled.shape == (1, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 4, 0, 0]))
assert arr_pooled[0, 1] == int(np.average([1, 5, 0, 0]))
arr = np.uint8([
[0, 1, 2],
[4, 5, 6],
[8, 9, 10]
])
arr_pooled = ia.pool(arr, 2, np.average, cval=22)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 22, 6, 22]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 22, 22]))
assert arr_pooled[1, 1] == int(np.average([10, 22, 22, 22]))
def test_avg_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.avg_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.average([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.average([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.average([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.average([10, 11, 14, 15]))
def test_max_pool():
# very basic test, as avg_pool() just calls pool(), which is tested in test_pool()
arr = np.uint8([
[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]
])
arr_pooled = ia.max_pool(arr, 2)
assert arr_pooled.shape == (2, 2)
assert arr_pooled.dtype == arr.dtype.type
assert arr_pooled[0, 0] == int(np.max([0, 1, 4, 5]))
assert arr_pooled[0, 1] == int(np.max([2, 3, 6, 7]))
assert arr_pooled[1, 0] == int(np.max([8, 9, 12, 13]))
assert arr_pooled[1, 1] == int(np.max([10, 11, 14, 15]))
def test_draw_grid():
image = np.zeros((2, 2, 3), dtype=np.uint8)
image[0, 0] = 64
image[0, 1] = 128
image[1, 0] = 192
image[1, 1] = 256
grid = ia.draw_grid([image], rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid(np.uint8([image]), rows=1, cols=1)
assert np.array_equal(grid, image)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image], rows=1, cols=2)
expected = np.hstack([image, image])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=2, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=2)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
grid = ia.draw_grid([image, image, image, image], rows=None, cols=None)
expected = np.vstack([
np.hstack([image, image]),
np.hstack([image, image])
])
assert np.array_equal(grid, expected)
def test_Keypoint():
eps = 1e-8
# x/y/x_int/y_int
kp = ia.Keypoint(y=1, x=2)
assert kp.y == 1
assert kp.x == 2
assert kp.y_int == 1
assert kp.x_int == 2
kp = ia.Keypoint(y=1.1, x=2.7)
assert 1.1 - eps < kp.y < 1.1 + eps
assert 2.7 - eps < kp.x < 2.7 + eps
assert kp.y_int == 1
assert kp.x_int == 3
# project
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.project((10, 10), (10, 10))
assert kp2.y == 1
assert kp2.x == 2
kp2 = kp.project((10, 10), (20, 10))
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.project((10, 10), (10, 20))
assert kp2.y == 1
assert kp2.x == 4
kp2 = kp.project((10, 10), (20, 20))
assert kp2.y == 2
assert kp2.x == 4
# shift
kp = ia.Keypoint(y=1, x=2)
kp2 = kp.shift(y=1)
assert kp2.y == 2
assert kp2.x == 2
kp2 = kp.shift(y=-1)
assert kp2.y == 0
assert kp2.x == 2
kp2 = kp.shift(x=1)
assert kp2.y == 1
assert kp2.x == 3
kp2 = kp.shift(x=-1)
assert kp2.y == 1
assert kp2.x == 1
kp2 = kp.shift(y=1, x=2)
assert kp2.y == 2
assert kp2.x == 4
# __repr__ / __str_
kp = ia.Keypoint(y=1, x=2)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.00000000, y=1.00000000)"
kp = ia.Keypoint(y=1.2, x=2.7)
assert kp.__repr__() == kp.__str__() == "Keypoint(x=2.70000000, y=1.20000000)"
def test_KeypointsOnImage():
eps = 1e-8
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
# height/width
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(10, 20, 3))
assert kpi.height == 10
assert kpi.width == 20
# image instead of shape
kpi = ia.KeypointsOnImage(keypoints=kps, shape=np.zeros((10, 20, 3), dtype=np.uint8))
assert kpi.shape == (10, 20, 3)
# on()
kpi2 = kpi.on((10, 20, 3))
assert all([kp_i.x == kp_j.x and kp_i.y == kp_j.y for kp_i, kp_j in zip(kpi.keypoints, kpi2.keypoints)])
kpi2 = kpi.on((20, 40, 3))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
kpi2 = kpi.on(np.zeros((20, 40, 3), dtype=np.uint8))
assert kpi2.keypoints[0].x == 2
assert kpi2.keypoints[0].y == 4
assert kpi2.keypoints[1].x == 6
assert kpi2.keypoints[1].y == 8
# draw_on_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=3, copy=True, raise_if_out_of_image=False)
kps_mask_size3 = np.copy(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1] = 1
assert np.all(image_kps[kps_mask_size3] == [0, 255, 0])
assert np.all(image_kps[~kps_mask_size3] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=[0, 0, 255], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 0, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image_kps = kpi.draw_on_image(image, color=255, size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [255, 255, 255])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
image2 = np.copy(image)
image_kps = kpi.draw_on_image(image2, color=[0, 255, 0], size=1, copy=False, raise_if_out_of_image=False)
assert np.all(image2 == image_kps)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
assert np.all(image2[kps_mask] == [0, 255, 0])
assert np.all(image2[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=100, y=100)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
kpi = ia.KeypointsOnImage(keypoints=kps + [ia.Keypoint(x=5, y=5)], shape=(5, 5, 3))
image = np.zeros((5, 5, 3), dtype=np.uint8) + 10
kps_mask = np.zeros(image.shape[0:2], dtype=np.bool)
kps_mask[2, 1] = 1
kps_mask[4, 3] = 1
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
got_exception = False
try:
image_kps = kpi.draw_on_image(image, color=[0, 255, 0], size=1, copy=True, raise_if_out_of_image=True)
assert np.all(image_kps[kps_mask] == [0, 255, 0])
assert np.all(image_kps[~kps_mask] == [10, 10, 10])
except Exception:
got_exception = True
assert got_exception
# shift
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.shift(x=0, y=0)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(x=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x - 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y
assert kpi2.keypoints[1].x == kpi.keypoints[1].x - 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y
kpi2 = kpi.shift(y=1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 1
kpi2 = kpi.shift(y=-1)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x
assert kpi2.keypoints[0].y == kpi.keypoints[0].y - 1
assert kpi2.keypoints[1].x == kpi.keypoints[1].x
assert kpi2.keypoints[1].y == kpi.keypoints[1].y - 1
kpi2 = kpi.shift(x=1, y=2)
assert kpi2.keypoints[0].x == kpi.keypoints[0].x + 1
assert kpi2.keypoints[0].y == kpi.keypoints[0].y + 2
assert kpi2.keypoints[1].x == kpi.keypoints[1].x + 1
assert kpi2.keypoints[1].y == kpi.keypoints[1].y + 2
# get_coords_array
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
observed = kpi.get_coords_array()
expected = np.float32([
[1, 2],
[3, 4]
])
assert np.allclose(observed, expected)
# from_coords_array
arr = np.float32([
[1, 2],
[3, 4]
])
kpi = ia.KeypointsOnImage.from_coords_array(arr, shape=(5, 5, 3))
assert 1 - eps < kpi.keypoints[0].x < 1 + eps
assert 2 - eps < kpi.keypoints[0].y < 2 + eps
assert 3 - eps < kpi.keypoints[1].x < 3 + eps
assert 4 - eps < kpi.keypoints[1].y < 4 + eps
# to_keypoint_image
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
image = kpi.to_keypoint_image(size=1)
image_size3 = kpi.to_keypoint_image(size=3)
kps_mask = np.zeros((5, 5, 2), dtype=np.bool)
kps_mask[2, 1, 0] = 1
kps_mask[4, 3, 1] = 1
kps_mask_size3 = np.zeros_like(kps_mask)
kps_mask_size3[2-1:2+1+1, 1-1:1+1+1, 0] = 1
kps_mask_size3[4-1:4+1+1, 3-1:3+1+1, 1] = 1
assert np.all(image[kps_mask] == 255)
assert np.all(image[~kps_mask] == 0)
assert np.all(image_size3[kps_mask] == 255)
assert np.all(image_size3[kps_mask_size3] >= 128)
assert np.all(image_size3[~kps_mask_size3] == 0)
# from_keypoint_image()
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 255
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == 4
assert kpi2.keypoints[1].x == 3
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords={"x": -1, "y": -2}, threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=(-1, -2), threshold=20,
nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 2
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[1].y == -2
assert kpi2.keypoints[1].x == -1
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
kpi2 = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords=None, threshold=20, nb_channels=3)
assert kpi2.shape == (5, 5, 3)
assert len(kpi2.keypoints) == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[0].x == 1
got_exception = False
try:
kps_image = np.zeros((5, 5, 2), dtype=np.uint8)
kps_image[2, 1, 0] = 255
kps_image[4, 3, 1] = 10
_ = ia.KeypointsOnImage.from_keypoint_image(kps_image, if_not_found_coords="exception-please", threshold=20,
nb_channels=3)
except Exception as exc:
assert "Expected if_not_found_coords to be" in str(exc)
got_exception = True
assert got_exception
# copy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.copy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 100
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# deepcopy()
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
kpi2 = kpi.deepcopy()
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
kps[0].x = 100
assert kpi2.keypoints[0].x == 1
assert kpi2.keypoints[0].y == 2
assert kpi2.keypoints[1].x == 3
assert kpi2.keypoints[1].y == 4
# repr/str
kps = [ia.Keypoint(x=1, y=2), ia.Keypoint(x=3, y=4)]
kpi = ia.KeypointsOnImage(keypoints=kps, shape=(5, 5, 3))
expected = "KeypointsOnImage([Keypoint(x=1.00000000, y=2.00000000), Keypoint(x=3.00000000, y=4.00000000)], " \
+ "shape=(5, 5, 3))"
assert kpi.__repr__() == kpi.__str__() == expected
def test_BoundingBox():
eps = 1e-8
# properties with ints
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
assert bb.width == 40 - 20
assert bb.height == 30 - 10
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# wrong order of y1/y2, x1/x2
bb = ia.BoundingBox(y1=30, x1=40, y2=10, x2=20, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 30
assert bb.x2_int == 40
# properties with floats
bb = ia.BoundingBox(y1=10.1, x1=20.1, y2=30.9, x2=40.9, label=None)
assert bb.y1_int == 10
assert bb.x1_int == 20
assert bb.y2_int == 31
assert bb.x2_int == 41
assert bb.width == 40.9 - 20.1
assert bb.height == 30.9 - 10.1
center_x = bb.x1 + (bb.x2 - bb.x1)/2
center_y = bb.y1 + (bb.y2 - bb.y1)/2
assert center_x - eps < bb.center_x < center_x + eps
assert center_y - eps < bb.center_y < center_y + eps
# area
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.area == (30-10) * (40-20)
# project
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (10, 10))
assert 10 - eps < bb2.y1 < 10 + eps
assert 20 - eps < bb2.x1 < 20 + eps
assert 30 - eps < bb2.y2 < 30 + eps
assert 40 - eps < bb2.x2 < 40 + eps
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.project((10, 10), (20, 20))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (5, 5))
assert 10*0.5 - eps < bb2.y1 < 10*0.5 + eps
assert 20*0.5 - eps < bb2.x1 < 20*0.5 + eps
assert 30*0.5 - eps < bb2.y2 < 30*0.5 + eps
assert 40*0.5 - eps < bb2.x2 < 40*0.5 + eps
bb2 = bb.project((10, 10), (10, 20))
assert 10*1 - eps < bb2.y1 < 10*1 + eps
assert 20*2 - eps < bb2.x1 < 20*2 + eps
assert 30*1 - eps < bb2.y2 < 30*1 + eps
assert 40*2 - eps < bb2.x2 < 40*2 + eps
bb2 = bb.project((10, 10), (20, 10))
assert 10*2 - eps < bb2.y1 < 10*2 + eps
assert 20*1 - eps < bb2.x1 < 20*1 + eps
assert 30*2 - eps < bb2.y2 < 30*2 + eps
assert 40*1 - eps < bb2.x2 < 40*1 + eps
# extend
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = bb.extend(all_sides=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+1
assert bb2.x1 == 20-1
assert bb2.x2 == 40+1
bb2 = bb.extend(all_sides=-1)
assert bb2.y1 == 10-(-1)
assert bb2.y2 == 30+(-1)
assert bb2.x1 == 20-(-1)
assert bb2.x2 == 40+(-1)
bb2 = bb.extend(top=1)
assert bb2.y1 == 10-1
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(right=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-0
assert bb2.x2 == 40+1
bb2 = bb.extend(bottom=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+1
assert bb2.x1 == 20-0
assert bb2.x2 == 40+0
bb2 = bb.extend(left=1)
assert bb2.y1 == 10-0
assert bb2.y2 == 30+0
assert bb2.x1 == 20-1
assert bb2.x2 == 40+0
# intersection
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_inter = bb1.intersection(bb2)
assert bb_inter.x1 == 39
assert bb_inter.x2 == 40
assert bb_inter.y1 == 10
assert bb_inter.y2 == 30
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
bb_inter = bb1.intersection(bb2, default=False)
assert bb_inter is False
# union
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=39, y2=30, x2=59, label=None)
bb_union = bb1.union(bb2)
assert bb_union.x1 == 20
assert bb_union.x2 == 59
assert bb_union.y1 == 10
assert bb_union.y2 == 30
# iou
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
iou = bb1.iou(bb2)
assert 1.0 - eps < iou < 1.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=10, x1=41, y2=30, x2=61, label=None)
iou = bb1.iou(bb2)
assert 0.0 - eps < iou < 0.0 + eps
bb1 = ia.BoundingBox(y1=10, x1=10, y2=20, x2=20, label=None)
bb2 = ia.BoundingBox(y1=15, x1=15, y2=25, x2=25, label=None)
iou = bb1.iou(bb2)
area_union = 10 * 10 + 10 * 10 - 5 * 5
area_intersection = 5 * 5
iou_expected = area_intersection / area_union
assert iou_expected - eps < iou < iou_expected + eps
# is_fully_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_fully_within_image((100, 100, 3)) is True
assert bb.is_fully_within_image((20, 100, 3)) is False
assert bb.is_fully_within_image((100, 30, 3)) is False
assert bb.is_fully_within_image((1, 1, 3)) is False
# is_partly_within_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_partly_within_image((100, 100, 3)) is True
assert bb.is_partly_within_image((20, 100, 3)) is True
assert bb.is_partly_within_image((100, 30, 3)) is True
assert bb.is_partly_within_image((1, 1, 3)) is False
# is_out_of_image()
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 100, 3), partly=True, fully=False) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((20, 100, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((20, 100, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((100, 30, 3), partly=False, fully=True) is False
assert bb.is_out_of_image((100, 30, 3), partly=True, fully=False) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=False, fully=True) is True
assert bb.is_out_of_image((1, 1, 3), partly=True, fully=False) is False
# cut_out_of_image
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_cut = bb.cut_out_of_image((100, 100, 3))
eps = np.finfo(np.float32).eps
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image(np.zeros((100, 100, 3), dtype=np.uint8))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((20, 100, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert 20 - 2*eps < bb_cut.y2 < 20
assert bb_cut.x2 == 40
bb_cut = bb.cut_out_of_image((100, 30, 3))
assert bb_cut.y1 == 10
assert bb_cut.x1 == 20
assert bb_cut.y2 == 30
assert 30 - 2*eps < bb_cut.x2 < 30
# shift
bb = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb_top = bb.shift(top=0)
bb_right = bb.shift(right=0)
bb_bottom = bb.shift(bottom=0)
bb_left = bb.shift(left=0)
assert bb_top.y1 == 10
assert bb_top.x1 == 20
assert bb_top.y2 == 30
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20
assert bb_right.y2 == 30
assert bb_right.x2 == 40
assert bb_bottom.y1 == 10
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20
assert bb_left.y2 == 30
assert bb_left.x2 == 40
bb_top = bb.shift(top=1)
bb_right = bb.shift(right=1)
bb_bottom = bb.shift(bottom=1)
bb_left = bb.shift(left=1)
assert bb_top.y1 == 10+1
assert bb_top.x1 == 20
assert bb_top.y2 == 30+1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20-1
assert bb_right.y2 == 30
assert bb_right.x2 == 40-1
assert bb_bottom.y1 == 10-1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30-1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20+1
assert bb_left.y2 == 30
assert bb_left.x2 == 40+1
bb_top = bb.shift(top=-1)
bb_right = bb.shift(right=-1)
bb_bottom = bb.shift(bottom=-1)
bb_left = bb.shift(left=-1)
assert bb_top.y1 == 10-1
assert bb_top.x1 == 20
assert bb_top.y2 == 30-1
assert bb_top.x2 == 40
assert bb_right.y1 == 10
assert bb_right.x1 == 20+1
assert bb_right.y2 == 30
assert bb_right.x2 == 40+1
assert bb_bottom.y1 == 10+1
assert bb_bottom.x1 == 20
assert bb_bottom.y2 == 30+1
assert bb_bottom.x2 == 40
assert bb_left.y1 == 10
assert bb_left.x1 == 20-1
assert bb_left.y2 == 30
assert bb_left.x2 == 40-1
bb_mix = bb.shift(top=1, bottom=2, left=3, right=4)
assert bb_mix.y1 == 10+1-2
assert bb_mix.x1 == 20+3-4
assert bb_mix.y2 == 30+3-4
assert bb_mix.x2 == 40+1-2
# draw_on_image()
image = np.zeros((10, 10, 3), dtype=np.uint8)
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[1:3+1, 1] = True
bb_mask[1:3+1, 3] = True
bb_mask[1, 1:3+1] = True
bb_mask[3, 1:3+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image == 0)
image_bb = bb.draw_on_image(image, color=[255, 0, 0], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 0, 0])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image, color=128, alpha=1.0, thickness=1, copy=True, raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [128, 128, 128])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
image_bb = bb.draw_on_image(image+100, color=[200, 200, 200], alpha=0.5, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [150, 150, 150])
assert np.all(image_bb[~bb_mask] == [100, 100, 100])
image_bb = bb.draw_on_image((image+100).astype(np.float32), color=[200, 200, 200], alpha=0.5, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.sum(np.abs((image_bb - [150, 150, 150])[bb_mask])) < 0.1
assert np.sum(np.abs((image_bb - [100, 100, 100])[~bb_mask])) < 0.1
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=False,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
assert np.all(image[bb_mask] == [255, 255, 255])
assert np.all(image[~bb_mask] == [0, 0, 0])
image = np.zeros_like(image)
bb = ia.BoundingBox(y1=-1, x1=-1, y2=2, x2=2, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[2, 0:3] = True
bb_mask[0:3, 2] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=1, x1=1, y2=3, x2=3, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:5, 0:5] = True
bb_mask[2, 2] = False
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=2, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
bb_mask = np.zeros(image.shape[0:2], dtype=np.bool)
bb_mask[0:1+1, 1] = True
bb_mask[1, 0:1+1] = True
image_bb = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=False)
assert np.all(image_bb[bb_mask] == [255, 255, 255])
assert np.all(image_bb[~bb_mask] == [0, 0, 0])
bb = ia.BoundingBox(y1=-1, x1=-1, y2=1, x2=1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is False
bb = ia.BoundingBox(y1=-5, x1=-5, y2=-1, x2=-1, label=None)
got_exception = False
try:
_ = bb.draw_on_image(image, color=[255, 255, 255], alpha=1.0, thickness=1, copy=True,
raise_if_out_of_image=True)
except Exception:
got_exception = True
assert got_exception is True
# extract_from_image()
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10))
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image[1:3, 1:3])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((0, 1), (0, 1), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=8, y2=11, x1=8, x2=11, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[8:11, 8:11, :])
image = np.random.RandomState(1234).randint(0, 255, size=(10, 10, 3))
image_pad = np.pad(image, ((1, 0), (1, 0), (0, 0)), mode="constant", constant_values=0)
bb = ia.BoundingBox(y1=-1, y2=3, x1=-1, x2=4, label=None)
image_sub = bb.extract_from_image(image)
assert np.array_equal(image_sub, image_pad[0:4, 0:5, :])
# to_keypoints()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
kps = bb.to_keypoints()
assert kps[0].y == 1
assert kps[0].x == 1
assert kps[1].y == 1
assert kps[1].x == 3
assert kps[2].y == 3
assert kps[2].x == 3
assert kps[3].y == 3
assert kps[3].x == 1
# copy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label="test")
bb2 = bb.copy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label == "test"
bb2 = bb.copy(y1=10, x1=20, y2=30, x2=40, label="test2")
assert bb2.y1 == 10
assert bb2.x1 == 20
assert bb2.y2 == 30
assert bb2.x2 == 40
assert bb2.label == "test2"
# deepcopy()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=["test"])
bb2 = bb.deepcopy()
assert bb2.y1 == 1
assert bb2.y2 == 3
assert bb2.x1 == 1
assert bb2.x2 == 3
assert bb2.label[0] == "test"
# BoundingBox_repr()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__repr__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
# test_BoundingBox_str()
bb = ia.BoundingBox(y1=1, y2=3, x1=1, x2=3, label=None)
assert bb.__str__() == "BoundingBox(x1=1.0000, y1=1.0000, x2=3.0000, y2=3.0000, label=None)"
def test_BoundingBoxesOnImage():
reseed()
# test height/width
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
assert bbsoi.height == 40
assert bbsoi.width == 50
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
assert bbsoi.height == 40
assert bbsoi.width == 50
# on()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=np.zeros((40, 50, 3), dtype=np.uint8))
bbsoi_projected = bbsoi.on((40, 50))
assert bbsoi_projected.bounding_boxes[0].y1 == 10
assert bbsoi_projected.bounding_boxes[0].x1 == 20
assert bbsoi_projected.bounding_boxes[0].y2 == 30
assert bbsoi_projected.bounding_boxes[0].x2 == 40
assert bbsoi_projected.bounding_boxes[1].y1 == 15
assert bbsoi_projected.bounding_boxes[1].x1 == 25
assert bbsoi_projected.bounding_boxes[1].y2 == 35
assert bbsoi_projected.bounding_boxes[1].x2 == 45
bbsoi_projected = bbsoi.on((40*2, 50*2, 3))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
bbsoi_projected = bbsoi.on(np.zeros((40*2, 50*2, 3), dtype=np.uint8))
assert bbsoi_projected.bounding_boxes[0].y1 == 10*2
assert bbsoi_projected.bounding_boxes[0].x1 == 20*2
assert bbsoi_projected.bounding_boxes[0].y2 == 30*2
assert bbsoi_projected.bounding_boxes[0].x2 == 40*2
assert bbsoi_projected.bounding_boxes[1].y1 == 15*2
assert bbsoi_projected.bounding_boxes[1].x1 == 25*2
assert bbsoi_projected.bounding_boxes[1].y2 == 35*2
assert bbsoi_projected.bounding_boxes[1].x2 == 45*2
# draw_on_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=45, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
image = bbsoi.draw_on_image(np.zeros(bbsoi.shape, dtype=np.uint8), color=[0, 255, 0], alpha=1.0, thickness=1,
copy=True, raise_if_out_of_image=False)
assert np.all(image[10-1, 20-1, :] == [0, 0, 0])
assert np.all(image[10-1, 20-0, :] == [0, 0, 0])
assert np.all(image[10-0, 20-1, :] == [0, 0, 0])
assert np.all(image[10-0, 20-0, :] == [0, 255, 0])
assert np.all(image[10+1, 20+1, :] == [0, 0, 0])
assert np.all(image[30-1, 40-1, :] == [0, 0, 0])
assert np.all(image[30+1, 40-0, :] == [0, 0, 0])
assert np.all(image[30+0, 40+1, :] == [0, 0, 0])
assert np.all(image[30+0, 40+0, :] == [0, 255, 0])
assert np.all(image[30+1, 40+1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-1, :] == [0, 0, 0])
assert np.all(image[15-1, 25-0, :] == [0, 0, 0])
assert np.all(image[15-0, 25-1, :] == [0, 0, 0])
assert np.all(image[15-0, 25-0, :] == [0, 255, 0])
assert np.all(image[15+1, 25+1, :] == [0, 0, 0])
assert np.all(image[35-1, 45-1, :] == [0, 0, 0])
assert np.all(image[35+1, 45+0, :] == [0, 0, 0])
assert np.all(image[35+0, 45+1, :] == [0, 0, 0])
assert np.all(image[35+0, 45+0, :] == [0, 255, 0])
assert np.all(image[35+1, 45+1, :] == [0, 0, 0])
# remove_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_slim = bbsoi.remove_out_of_image(fully=True, partly=True)
assert len(bbsoi_slim.bounding_boxes) == 1
assert bbsoi_slim.bounding_boxes[0] == bb1
# cut_out_of_image()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
eps = np.finfo(np.float32).eps
bbsoi_cut = bbsoi.cut_out_of_image()
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_cut.bounding_boxes[0].y1 == 10
assert bbsoi_cut.bounding_boxes[0].x1 == 20
assert bbsoi_cut.bounding_boxes[0].y2 == 30
assert bbsoi_cut.bounding_boxes[0].x2 == 40
assert bbsoi_cut.bounding_boxes[1].y1 == 15
assert bbsoi_cut.bounding_boxes[1].x1 == 25
assert bbsoi_cut.bounding_boxes[1].y2 == 35
assert 50 - 2*eps < bbsoi_cut.bounding_boxes[1].x2 < 50
# shift()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_shifted = bbsoi.shift(right=1)
assert len(bbsoi_cut.bounding_boxes) == 2
assert bbsoi_shifted.bounding_boxes[0].y1 == 10
assert bbsoi_shifted.bounding_boxes[0].x1 == 20 - 1
assert bbsoi_shifted.bounding_boxes[0].y2 == 30
assert bbsoi_shifted.bounding_boxes[0].x2 == 40 - 1
assert bbsoi_shifted.bounding_boxes[1].y1 == 15
assert bbsoi_shifted.bounding_boxes[1].x1 == 25 - 1
assert bbsoi_shifted.bounding_boxes[1].y2 == 35
assert bbsoi_shifted.bounding_boxes[1].x2 == 51 - 1
# copy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.copy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 0
# deepcopy()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bbsoi_copy = bbsoi.deepcopy()
assert len(bbsoi.bounding_boxes) == 2
assert bbsoi_copy.bounding_boxes[0].y1 == 10
assert bbsoi_copy.bounding_boxes[0].x1 == 20
assert bbsoi_copy.bounding_boxes[0].y2 == 30
assert bbsoi_copy.bounding_boxes[0].x2 == 40
assert bbsoi_copy.bounding_boxes[1].y1 == 15
assert bbsoi_copy.bounding_boxes[1].x1 == 25
assert bbsoi_copy.bounding_boxes[1].y2 == 35
assert bbsoi_copy.bounding_boxes[1].x2 == 51
bbsoi.bounding_boxes[0].y1 = 0
assert bbsoi_copy.bounding_boxes[0].y1 == 10
# repr() / str()
bb1 = ia.BoundingBox(y1=10, x1=20, y2=30, x2=40, label=None)
bb2 = ia.BoundingBox(y1=15, x1=25, y2=35, x2=51, label=None)
bbsoi = ia.BoundingBoxesOnImage([bb1, bb2], shape=(40, 50, 3))
bb1_expected = "BoundingBox(x1=20.0000, y1=10.0000, x2=40.0000, y2=30.0000, label=None)"
bb2_expected = "BoundingBox(x1=25.0000, y1=15.0000, x2=51.0000, y2=35.0000, label=None)"
expected = "BoundingBoxesOnImage([%s, %s], shape=(40, 50, 3))" % (bb1_expected, bb2_expected)
assert bbsoi.__repr__() == bbsoi.__str__() == expected
def test_HeatmapsOnImage_draw():
heatmaps_arr = np.float32([
[0.5, 0.0, 0.0, 0.5],
[0.0, 1.0, 1.0, 0.0],
[0.0, 1.0, 1.0, 0.0],
[0.5, 0.0, 0.0, 0.5],
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_drawn = heatmaps.draw()[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 1]
v2 = heatmaps_drawn[0, 0]
v3 = heatmaps_drawn[1, 1]
for y, x in [(0, 1), (0, 2), (1, 0), (1, 3), (2, 0), (2, 3), (3, 1), (3, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v1)
for y, x in [(0, 0), (0, 3), (3, 0), (3, 3)]:
assert np.allclose(heatmaps_drawn[y, x], v2)
for y, x in [(1, 1), (1, 2), (2, 1), (2, 2)]:
assert np.allclose(heatmaps_drawn[y, x], v3)
# size differs from heatmap array size
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_drawn = heatmaps.draw(size=(4, 4))[0]
assert heatmaps_drawn.shape == (4, 4, 3)
v1 = heatmaps_drawn[0, 0]
v2 = heatmaps_drawn[0, -1]
for y in range(4):
for x in range(2):
assert np.allclose(heatmaps_drawn[y, x], v1)
for y in range(4):
for x in range(2, 4):
assert np.allclose(heatmaps_drawn[y, x], v2)
def test_HeatmapsOnImage_draw_on_image():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
image = np.uint8([
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255],
[0, 0, 0, 255]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, cmap=None)[0]
assert heatmaps_drawn.shape == (4, 4, 3)
assert np.all(heatmaps_drawn[0:4, 0:2, :] == 0)
assert np.all(heatmaps_drawn[0:4, 2:3, :] == 128) or np.all(heatmaps_drawn[0:4, 2:3, :] == 127)
assert np.all(heatmaps_drawn[0:4, 3:4, :] == 255) or np.all(heatmaps_drawn[0:4, 3:4, :] == 254)
image = np.uint8([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]
])
image = np.tile(image[..., np.newaxis], (1, 1, 3))
heatmaps_drawn = heatmaps.draw_on_image(image, alpha=0.5, resize="image", cmap=None)[0]
assert heatmaps_drawn.shape == (2, 2, 3)
assert np.all(heatmaps_drawn[0:2, 0, :] == 0)
assert np.all(heatmaps_drawn[0:2, 1, :] == 128) or np.all(heatmaps_drawn[0:2, 1, :] == 127)
def test_HeatmapsOnImage_invert():
heatmaps_arr = np.float32([
[0.0, 5.0, 10.0],
[-1.0, -2.0, 7.5]
])
expected = np.float32([
[8.0, 3.0, -2.0],
[9.0, 10.0, 0.5]
])
# (H, W)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr)
assert np.allclose(heatmaps.invert().get_arr(), expected)
# (H, W, 1)
heatmaps = ia.HeatmapsOnImage(heatmaps_arr[..., np.newaxis], shape=(2, 3), min_value=-2.0, max_value=10.0)
assert np.allclose(heatmaps.get_arr(), heatmaps_arr[..., np.newaxis])
assert np.allclose(heatmaps.invert().get_arr(), expected[..., np.newaxis])
def test_HeatmapsOnImage_pad():
heatmaps_arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(2, 2, 3))
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, cval=0.5)
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0, 1.0, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
])
)
heatmaps_padded = heatmaps.pad(top=1, right=2, bottom=3, left=4, mode="edge")
assert heatmaps_padded.arr_0to1.shape == (2+(1+3), 2+(4+2), 1)
assert np.allclose(
heatmaps_padded.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0]
])
)
def test_HeatmapsOnImage_avg_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.avg_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 0.75],
[0.0, 0.75]])
)
def test_HeatmapsOnImage_max_pool():
heatmaps_arr = np.float32([
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0],
[0.0, 0.0, 0.5, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_pooled = heatmaps.max_pool(2)
assert heatmaps_pooled.arr_0to1.shape == (2, 2, 1)
assert np.allclose(
heatmaps_pooled.arr_0to1[:, :, 0],
np.float32([[0.0, 1.0],
[0.0, 1.0]])
)
def test_HeatmapsOnImage_scale():
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale((4, 4), interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (4, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
heatmaps_arr = np.float32([
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps_arr, shape=(4, 4, 3))
heatmaps_scaled = heatmaps.scale(2.0, interpolation="nearest")
assert heatmaps_scaled.arr_0to1.shape == (2, 4, 1)
assert heatmaps_scaled.arr_0to1.dtype.type == np.float32
assert np.allclose(
heatmaps_scaled.arr_0to1[:, :, 0],
np.float32([
[0.0, 0.0, 1.0, 1.0],
[0.0, 0.0, 1.0, 1.0]
])
)
def test_SegmentationMapOnImage_bool():
# Test for #189 (boolean mask inputs into SegmentationMapOnImage not working)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr = np.array([
[0, 0, 0],
[0, 1, 0],
[0, 0, 0]
], dtype=np.bool)
assert arr.dtype.type == np.bool_
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
def test_SegmentationMapOnImage_get_arr_int():
arr = np.int32([
[0, 0, 1],
[0, 2, 1],
[1, 3, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
observed = segmap.get_arr_int()
assert observed.dtype.type == np.int32
assert np.array_equal(arr, observed)
arr_c0 = np.float32([
[0.1, 0.1, 0.1],
[0.1, 0.9, 0.1],
[0.0, 0.1, 0.0]
])
arr_c1 = np.float32([
[0.2, 1.0, 0.2],
[0.2, 0.8, 0.2],
[0.0, 0.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 0.0, 0.0],
[0.3, 0.7, 0.3],
[0.1, 0.0, 0.0001]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis],
arr_c2[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed = segmap.get_arr_int()
expected = np.int32([
[2, 2, 2],
[3, 1, 3],
[3, 1, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
got_exception = False
try:
_ = segmap.get_arr_int(background_class_id=2)
except Exception as exc:
assert "The background class id may only be changed if " in str(exc)
got_exception = True
assert got_exception
observed = segmap.get_arr_int(background_threshold=0.21)
expected = np.int32([
[0, 2, 0],
[3, 1, 3],
[0, 0, 0]
])
assert observed.dtype.type == np.int32
assert np.array_equal(observed, expected)
def test_SegmentationMapOnImage_draw():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
# simple example with 2 classes
observed = segmap.draw()
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# same example, with resizing to 2x the size
observed = segmap.draw(size=(6, 6))
expected = ia.imresize_single_image(expected, (6, 6), interpolation="nearest")
assert np.array_equal(observed, expected)
# custom choice of colors
col0 = (10, 10, 10)
col1 = (50, 51, 52)
observed = segmap.draw(colors=[col0, col1])
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# background_threshold, background_class and foreground mask
arr_c0 = np.float32([
[0, 0, 0],
[1.0, 0, 0],
[0, 0, 0]
])
arr_c1 = np.float32([
[0, 1, 1],
[0, 1, 1],
[0.1, 1, 1]
])
arr = np.concatenate([
arr_c0[..., np.newaxis],
arr_c1[..., np.newaxis]
], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3))
observed, observed_fg = segmap.draw(background_threshold=0.01, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col2, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[True, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
# background_threshold, background_class and foreground mask
# here with higher threshold so that bottom left pixel switches to background
observed, observed_fg = segmap.draw(background_threshold=0.11, return_foreground_mask=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
col2 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[2]
expected = np.uint8([
[col0, col2, col2],
[col1, col2, col2],
[col0, col2, col2]
])
expected_fg = np.array([
[False, True, True],
[True, True, True],
[False, True, True]
], dtype=np.bool)
assert np.array_equal(observed, expected)
assert np.array_equal(observed_fg, expected_fg)
def test_SegmentationMapOnImage_draw_on_image():
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
# only image visible
observed = segmap.draw_on_image(image, alpha=0)
assert np.array_equal(observed, image)
# only segmap visible
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
assert np.array_equal(observed, expected)
# only segmap visible - in foreground
observed = segmap.draw_on_image(image, alpha=1.0, draw_background=False)
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[image[0, 0, :], col1, col1],
[image[1, 0, :], col1, col1],
[image[2, 0, :], col1, col1]
])
assert np.array_equal(observed, expected)
# overlay without background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=False)
col1 = np.uint8(ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1])
expected = np.float32([
[image[0, 0, :], a0*image[0, 1, :] + a1*col1, a0*image[0, 2, :] + a1*col1],
[image[1, 0, :], a0*image[1, 1, :] + a1*col1, a0*image[1, 2, :] + a1*col1],
[image[2, 0, :], a0*image[2, 1, :] + a1*col1, a0*image[2, 2, :] + a1*col1]
])
d_max = np.max(np.abs(observed.astype(np.float32) - expected))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# overlay with background drawn
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True)
col0 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[0]
col1 = ia.SegmentationMapOnImage.DEFAULT_SEGMENT_COLORS[1]
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of segmap to image
arr = np.int32([
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20],
[30, 40, 50],
[60, 70, 80]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="segmentation_map")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
# resizing of image to segmap
arr = np.int32([
[0, 1, 1],
[0, 1, 1],
[0, 1, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(1, 3), nb_classes=2)
image = np.uint8([
[0, 10, 20]
])
image = np.tile(image[:, :, np.newaxis], (1, 1, 3))
image_rs = ia.imresize_single_image(image, arr.shape[0:2], interpolation="cubic")
a1 = 0.7
a0 = 1.0 - a1
observed = segmap.draw_on_image(image, alpha=a1, draw_background=True, resize="image")
expected = np.uint8([
[col0, col1, col1],
[col0, col1, col1],
[col0, col1, col1]
])
expected = a0 * image_rs + a1 * expected
d_max = np.max(np.abs(observed.astype(np.float32) - expected.astype(np.float32)))
assert observed.shape == expected.shape
assert d_max <= 1.0 + 1e-4
def test_SegmentationMapOnImage_pad():
arr = np.int32([
[0, 1, 1],
[0, 2, 1],
[0, 1, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(3, 3), nb_classes=4)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad(top=1, right=2, bottom=3, left=4, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 3), (4, 2), (0, 0)), mode="edge")
assert np.allclose(observed, expected)
def test_SegmentationMapOnImage_pad_to_aspect_ratio():
arr = np.int32([
[0, 1, 1],
[0, 2, 1]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 3), nb_classes=3)
segmap_padded = segmap.pad_to_aspect_ratio(1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(1.0, cval=1.0)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="constant", constant_values=1.0)
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(1.0, mode="edge")
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((1, 0), (0, 0), (0, 0)), mode="edge")
assert np.allclose(observed, expected)
segmap_padded = segmap.pad_to_aspect_ratio(0.5)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((2, 2), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
segmap_padded, pad_amounts = segmap.pad_to_aspect_ratio(0.5, return_pad_amounts=True)
observed = segmap_padded.arr
expected = np.pad(segmap.arr, ((2, 2), (0, 0), (0, 0)), mode="constant", constant_values=0)
assert np.allclose(observed, expected)
assert pad_amounts == (2, 0, 2, 0)
def test_SegmentationMapOnImage_scale():
arr = np.int32([
[0, 1],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
segmap_scaled = segmap.scale((4, 4))
observed = segmap_scaled.arr
expected = np.clip(ia.imresize_single_image(segmap.arr, (4, 4), interpolation="cubic"), 0, 1.0)
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
segmap_scaled = segmap.scale((4, 4), interpolation="nearest")
observed = segmap_scaled.arr
expected = ia.imresize_single_image(segmap.arr, (4, 4), interpolation="nearest")
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
segmap_scaled = segmap.scale(2.0)
observed = segmap_scaled.arr
expected = np.clip(ia.imresize_single_image(segmap.arr, 2.0, interpolation="cubic"), 0, 1.0)
assert np.allclose(observed, expected)
assert np.array_equal(segmap_scaled.get_arr_int(), np.int32([
[0, 0, 1, 1],
[0, 0, 1, 1],
[0, 0, 2, 2],
[0, 0, 2, 2],
]))
def test_SegmentationMapOnImage_to_heatmaps():
arr = np.int32([
[0, 1],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps = segmap.to_heatmaps()
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c1 = np.float32([
[0.0, 1.0],
[0.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 0.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
# only_nonempty when all are nonempty
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c1 = np.float32([
[0.0, 1.0],
[0.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 0.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
assert len(class_indices) == 3
assert [idx in class_indices for idx in [0, 1, 2]]
# only_nonempty when one is empty and two are nonempty
arr = np.int32([
[0, 2],
[0, 2]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
expected_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
expected_c2 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c2[..., np.newaxis]
], axis=2)
assert np.allclose(heatmaps.arr_0to1, expected)
assert len(class_indices) == 2
assert [idx in class_indices for idx in [0, 2]]
# only_nonempty when all are empty
arr_c0 = np.float32([
[0.0, 0.0],
[0.0, 0.0]
])
arr = arr_c0[..., np.newaxis]
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True)
assert heatmaps is None
assert len(class_indices) == 0
# only_nonempty when all are empty and not_none_if_no_nonempty is True
arr_c0 = np.float32([
[0.0, 0.0],
[0.0, 0.0]
])
arr = arr_c0[..., np.newaxis]
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=3)
heatmaps, class_indices = segmap.to_heatmaps(only_nonempty=True, not_none_if_no_nonempty=True)
assert np.allclose(heatmaps.arr_0to1, np.zeros((2, 2), dtype=np.float32))
assert len(class_indices) == 1
assert [idx in class_indices for idx in [0]]
def test_SegmentationMapOnImage_from_heatmaps():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
heatmaps = ia.HeatmapsOnImage.from_0to1(arr, shape=(2, 2))
segmap = ia.SegmentationMapOnImage.from_heatmaps(heatmaps)
assert np.allclose(segmap.arr, arr)
# with class_indices
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c2 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c2[..., np.newaxis]], axis=2)
heatmaps = ia.HeatmapsOnImage.from_0to1(arr, shape=(2, 2))
segmap = ia.SegmentationMapOnImage.from_heatmaps(heatmaps, class_indices=[0, 2], nb_classes=4)
expected_c0 = np.copy(arr_c0)
expected_c1 = np.zeros(arr_c0.shape)
expected_c2 = np.copy(arr_c2)
expected_c3 = np.zeros(arr_c0.shape)
expected = np.concatenate([
expected_c0[..., np.newaxis],
expected_c1[..., np.newaxis],
expected_c2[..., np.newaxis],
expected_c3[..., np.newaxis]
], axis=2)
assert np.allclose(segmap.arr, expected)
def test_SegmentationMapOnImage_copy():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2))
observed = segmap.copy()
assert np.allclose(observed.arr, segmap.arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == segmap.nb_classes
assert observed.input_was == segmap.input_was
arr = np.int32([
[0, 1],
[2, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=10)
observed = segmap.copy()
assert np.array_equal(observed.get_arr_int(), arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == 10
assert observed.input_was == segmap.input_was
def test_SegmentationMapOnImage_deepcopy():
arr_c0 = np.float32([
[1.0, 0.0],
[1.0, 0.0]
])
arr_c1 = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
arr = np.concatenate([arr_c0[..., np.newaxis], arr_c1[..., np.newaxis]], axis=2)
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2))
observed = segmap.deepcopy()
assert np.allclose(observed.arr, segmap.arr)
assert observed.shape == (2, 2)
assert observed.nb_classes == segmap.nb_classes
assert observed.input_was == segmap.input_was
segmap.arr[0, 0, 0] = 0.0
assert not np.allclose(observed.arr, segmap.arr)
arr = np.int32([
[0, 1],
[2, 3]
])
segmap = ia.SegmentationMapOnImage(arr, shape=(2, 2), nb_classes=10)
observed = segmap.deepcopy()
assert np.array_equal(observed.get_arr_int(), segmap.get_arr_int())
assert observed.shape == (2, 2)
assert observed.nb_classes == 10
assert observed.input_was == segmap.input_was
segmap.arr[0, 0, 0] = 0.0
segmap.arr[0, 0, 1] = 1.0
assert not np.array_equal(observed.get_arr_int(), segmap.get_arr_int())
def test_Polygon___init__():
# exterior is list of Keypoint or
poly = ia.Polygon([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1), ia.Keypoint(x=0.5, y=2.5)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is list of tuple of floats
poly = ia.Polygon([(0.0, 0.0), (1.0, 1.0), (0.5, 2.5)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is list of tuple of integer
poly = ia.Polygon([(0, 0), (1, 1), (1, 3)])
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[1.0, 3.0]
])
)
# exterior is (N,2) ndarray
poly = ia.Polygon(
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# exterior is (N,2) ndarray in float64
poly = ia.Polygon(
np.float64([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
assert poly.exterior.dtype.type == np.float32
assert np.allclose(
poly.exterior,
np.float32([
[0.0, 0.0],
[1.0, 1.0],
[0.5, 2.5]
])
)
# arrays without points
poly = ia.Polygon([])
assert poly.exterior.dtype.type == np.float32
assert poly.exterior.shape == (0, 2)
poly = ia.Polygon(np.zeros((0, 2), dtype=np.float32))
assert poly.exterior.dtype.type == np.float32
assert poly.exterior.shape == (0, 2)
# bad array shape
got_exception = False
try:
_ = ia.Polygon( | np.zeros((8,), dtype=np.float32) | numpy.zeros |
import numpy as np
import minkf as kf
def test_filter_and_smoother():
# case 1: 1d-signal, constant matrices
y = np.ones(3)
x0 = np.array([0.0])
Cest0 = 1 * np.array([[1.0]])
M = np.array([[1.0]])
K = np.array([[1.0]])
Q = np.array([[1.0]])
R = np.array([[1.0]])
res = kf.run_filter(y, x0, Cest0, M, K, Q, R, likelihood=True)
exp_x = [np.array([0.66666]), np.array([0.875]), np.array([0.952381])]
exp_C = [np.array([[0.66666]]), np.array([[0.625]]), np.array([[0.619048]])]
np.testing.assert_allclose(res['x'], exp_x, rtol=1e-4)
np.testing.assert_allclose(res['C'], exp_C, rtol=1e-4)
np.testing.assert_allclose(res['loglike'], 8.74862982742765)
res_smo = kf.run_smoother(y, x0, Cest0, M, K, Q, R)
exp_x_smo = [np.array([0.7619]), np.array([0.90476]), np.array([0.95238])]
exp_C_smo = [np.array([[0.47619]]), np.array([[0.47619]]),
np.array([[0.61905]])]
np.testing.assert_allclose(res_smo['x'], exp_x_smo, rtol=1e-4)
np.testing.assert_allclose(res_smo['C'], exp_C_smo, rtol=1e-4)
# case 2: 2d-signal, constant matrices
y = [np.ones(2), | np.ones(2) | numpy.ones |
import numpy as np
from merlFunctions import *
from coordinateFunctions import *
from reconstructionFunctions import *
#BRDF observations (5 RGB values)
obs = np.array([[0.09394814, 0.01236500, 0.00221087],
[0.09005638, 0.00315711, 0.00270478],
[1.38033974, 1.21132099, 1.19253075],
[0.97795460, 0.85147798, 0.84648135],
[0.10845871, 0.05911538, 0.05381590]])
#Coordinates for observations (phi_d, theta_h, theta_d)
coords = np.array([[36.2, 1.4, 4.0 ],
[86.5, 76.7, 13.1],
[85.5, 7.6, 78.9],
[144.8, 2.5, 73.8],
[80.4, 12.9, 51.6]])
#Convert to BRDF coordinates
MERLCoords = RusinkToMERL(np.deg2rad(coords))
#Convert to IDs (i.e. rows-ids in the PC matrix)
MERLIds = MERLToID(MERLCoords)
#Load precomputed data
dataDir = "data/"
maskMap = np.load('%s/MaskMap.npy'%dataDir) #Indicating valid regions in MERL BRDFs
median = np.load('%s/Median.npy'%dataDir) #Median, learned from trainingdata
cosMap = np.load('%s/CosineMap.npy'%dataDir) #Precomputed cosine-term for all BRDF locations (ids)
relativeOffset = np.load('%s/RelativeOffset.npy'%dataDir) #Offset, learned from trainingdata
Q = | np.load('%s/ScaledEigenvectors.npy'%dataDir) | numpy.load |
import numpy as np
import scipy.integrate as integrate
# from numba import jit
# Parameter set:
# p.g = 9.81;
# p.k10 = 30;
# p.alpha = pi/9;
# p.m = 80;
# p.beta0 = 170/180*pi; % resting angle. Cannot be n*pi
# p.l1 = sqrt(1/(2*(1-cos(p.beta0))));
# p.l2 = p.l1;
# p.l0 = sqrt(p.l1^2 + p.l2^2 - 2*p.l1*p.l2*np.cos(p.beta0));
# p.beta10 = acos( (p.l1^2 + p.l2^2 - (0.9*p.l0)^2)/(2*p.l1*p.l2));
# p.c = p.k10*p.m*p.g/p.l0 * (p.l1*p.l2*0.1)/(0.9) * sin(p.beta10) / (p.beta0 - p.beta10);
def feasible(x, p):
'''
check if state is at all feasible (body/foot underground)
returns a boolean
'''
if x[5] < 0 or x[1] < 0:
return False
return True
def p_map(x, p):
'''
Wrapper function for step function, returning only x_next, and -1 if failed
Essentially, the Poincare map.
'''
if type(p) is dict:
if not feasible(x, p):
return x, True # return failed if foot starts underground
sol = step(x, p)
# if len(sol.t_events) < 7:
# # print(len(sol.t_events))
# return sol.y[:, -1], True
return sol.y[:, -1], check_failure(sol.y[:, -1])
elif type(p) is tuple:
vector_of_x = np.zeros(x.shape) # initialize result array
vector_of_fail = np.zeros(x.shape[1])
# TODO: for shorthand, allow just a single tuple to be passed in
# this can be done easily with itertools
for idx, p0 in enumerate(p):
if not feasible(x, p):
vector_of_x[:, idx] = x[:, idx]
vector_of_fail[idx] = True
else:
sol = step(x[:, idx], p0) # p0 = p[idx]
vector_of_x[:, idx] = sol.y[:, -1]
vector_of_fail[idx] = check_failure(sol.y[:, -1])
return vector_of_x, vector_of_fail
else:
print("WARNING: I got a parameter type that I don't understand.")
return (x, True)
def step(x0, p, prev_sol = None):
'''
Take one step from apex to apex/failure.
returns a sol object from integrate.solve_ivp, with all phases
'''
# * nested functions - scroll down to step code * #
# unpacking constants for faster lookup
AOA = p['angle_of_attack'] #
GRAVITY = p['gravity'] #
MASS = p['mass'] #
RESTING_ANGLE = p['resting_angle']
UPPER_LEG = p['upper_leg']
LOWER_LEG = p['lower_leg']
RESTING_LENGTH = (np.sqrt(p['upper_leg']**2 + p['lower_leg']**2
- 2*p['upper_leg']*p['lower_leg']*np.cos(p['resting_angle']) ))
STIFFNESS = p['stiffness']
TOTAL_ENERGY = p['total_energy']
# SPECIFIC_STIFFNESS = p['stiffness'] / p['mass']
MAX_TIME = 5
# @jit(nopython=True)
def flight_dynamics(t, x):
# code in flight dynamics, xdot_ = f()
return np.array([x[2], x[3], 0, -GRAVITY, x[2], x[3], 0])
# @jit(nopython=True)
def stance_dynamics2(t, x):
# stance dynamics
alpha = | np.arctan2(x[1] - x[5], x[0] - x[4]) | numpy.arctan2 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from collections import OrderedDict
import numpy as np
from astropy.table import Table, vstack
from astropy import units as u
from astropy.io.registry import IORegistryError
from ..utils.scripts import make_path
from ..utils.table import table_standardise_units_copy, table_from_row_data
from ..utils.fitting import Fit
from ..stats.fit_statistics import chi2
from .models import PowerLaw
from .powerlaw import power_law_integral_flux
from . import SpectrumObservationList, SpectrumObservation
__all__ = [
"FluxPoints",
# 'FluxPointProfiles',
"FluxPointEstimator",
"FluxPointFit",
]
log = logging.getLogger(__name__)
REQUIRED_COLUMNS = OrderedDict(
[
("dnde", ["e_ref", "dnde"]),
("e2dnde", ["e_ref", "e2dnde"]),
("flux", ["e_min", "e_max", "flux"]),
("eflux", ["e_min", "e_max", "eflux"]),
]
)
OPTIONAL_COLUMNS = OrderedDict(
[
("dnde", ["dnde_err", "dnde_errp", "dnde_errn", "dnde_ul", "is_ul"]),
("e2dnde", ["e2dnde_err", "e2dnde_errp", "e2dnde_errn", "e2dnde_ul", "is_ul"]),
("flux", ["flux_err", "flux_errp", "flux_errn", "flux_ul", "is_ul"]),
("eflux", ["eflux_err", "eflux_errp", "eflux_errn", "eflux_ul", "is_ul"]),
]
)
DEFAULT_UNIT = OrderedDict(
[
("dnde", u.Unit("cm-2 s-1 TeV-1")),
("e2dnde", u.Unit("erg cm-2 s-1")),
("flux", u.Unit("cm-2 s-1")),
("eflux", u.Unit("erg cm-2 s-1")),
]
)
class FluxPoints(object):
"""Flux points container.
The supported formats are described here: :ref:`gadf:flux-points`
In summary, the following formats and minimum required columns are:
* Format ``dnde``: columns ``e_ref`` and ``dnde``
* Format ``e2dnde``: columns ``e_ref``, ``e2dnde``
* Format ``flux``: columns ``e_min``, ``e_max``, ``flux``
* Format ``eflux``: columns ``e_min``, ``e_max``, ``eflux``
Parameters
----------
table : `~astropy.table.Table`
Table with flux point data
Attributes
----------
table : `~astropy.table.Table`
Table with flux point data
Examples
--------
The `FluxPoints` object is most easily created by reading a file with
flux points given in one of the formats documented above:
.. code:: python
from gammapy.spectrum import FluxPoints
filename = '$GAMMAPY_EXTRA/test_datasets/spectrum/flux_points/flux_points.fits'
flux_points = FluxPoints.read(filename)
flux_points.plot()
An instance of `FluxPoints` can also be created by passing an instance of
`astropy.table.Table`, which contains the required columns, such as `'e_ref'`
and `'dnde'`:
.. code:: python
from astropy import units as u
from astropy.table import Table
from gammapy.spectrum import FluxPoints
from gammapy.spectrum.models import PowerLaw
table = Table()
pwl = PowerLaw()
e_ref = np.logspace(0, 2, 7) * u.TeV
table['e_ref'] = e_ref
table['dnde'] = pwl(e_ref)
table.meta['SED_TYPE'] = 'dnde'
flux_points = FluxPoints(table)
flux_points.plot()
If you have flux points in a different data format, the format can be changed
by renamimg the table columns and adding meta data:
.. code:: python
from astropy import units as u
from astropy.table import Table
from gammapy.spectrum import FluxPoints
table = Table.read('$GAMMAPY_EXTRA/test_datasets/spectrum/flux_points/flux_points_ctb_37b.txt',
format='ascii.csv', delimiter=' ', comment='#')
table.meta['SED_TYPE'] = 'dnde'
table.rename_column('Differential_Flux', 'dnde')
table['dnde'].unit = 'cm-2 s-1 TeV-1'
table.rename_column('lower_error', 'dnde_errn')
table['dnde_errn'].unit = 'cm-2 s-1 TeV-1'
table.rename_column('upper_error', 'dnde_errp')
table['dnde_errp'].unit = 'cm-2 s-1 TeV-1'
table.rename_column('E', 'e_ref')
table['e_ref'].unit = 'TeV'
flux_points = FluxPoints(table)
flux_points.plot()
"""
def __init__(self, table):
self.table = table_standardise_units_copy(table)
# validate that the table is a valid representation
# of the given flux point sed type
self._validate_table(self.table)
def __repr__(self):
fmt = '{}(sed_type="{}", n_points={})'
return fmt.format(self.__class__.__name__, self.sed_type, len(self.table))
@classmethod
def read(cls, filename, **kwargs):
"""Read flux points.
Parameters
----------
filename : str
Filename
kwargs : dict
Keyword arguments passed to `astropy.table.Table.read`.
"""
filename = make_path(filename)
try:
table = Table.read(str(filename), **kwargs)
except IORegistryError:
kwargs.setdefault("format", "ascii.ecsv")
table = Table.read(str(filename), **kwargs)
if "SED_TYPE" not in table.meta.keys():
sed_type = cls._guess_sed_type(table)
table.meta["SED_TYPE"] = sed_type
return cls(table=table)
def write(self, filename, **kwargs):
"""Write flux points.
Parameters
----------
filename : str
Filename
kwargs : dict
Keyword arguments passed to `astropy.table.Table.write`.
"""
filename = make_path(filename)
try:
self.table.write(str(filename), **kwargs)
except IORegistryError:
kwargs.setdefault("format", "ascii.ecsv")
self.table.write(str(filename), **kwargs)
@classmethod
def stack(cls, flux_points):
"""Create flux points by stacking list of flux points.
The first `FluxPoints` object in the list is taken as a reference to infer
column names and units for the stacked object.
Parameters
----------
flux_points : list of `FluxPoints`
List of flux points to stack.
Returns
-------
flux_points : `FluxPoints`
Flux points without upper limit points.
"""
reference = flux_points[0].table
tables = []
for _ in flux_points:
table = _.table
for colname in reference.colnames:
column = reference[colname]
if column.unit:
table[colname] = table[colname].quantity.to(column.unit)
tables.append(table[reference.colnames])
table_stacked = vstack(tables)
table_stacked.meta["SED_TYPE"] = reference.meta["SED_TYPE"]
return cls(table_stacked)
def drop_ul(self):
"""Drop upper limit flux points.
Returns
-------
flux_points : `FluxPoints`
Flux points with upper limit points removed.
Examples
--------
>>> from gammapy.spectrum import FluxPoints
>>> filename = '$GAMMAPY_EXTRA/test_datasets/spectrum/flux_points/flux_points.fits'
>>> flux_points = FluxPoints.read(filename)
>>> print(flux_points)
FluxPoints(sed_type="flux", n_points=24)
>>> print(flux_points.drop_ul())
FluxPoints(sed_type="flux", n_points=19)
"""
table_drop_ul = self.table[~self.is_ul]
return self.__class__(table_drop_ul)
def to_sed_type(self, sed_type, method="log_center", model=None, pwl_approx=False):
"""Convert to a different SED type (return new `FluxPoints`).
See: http://adsabs.harvard.edu/abs/1995NIMPA.355..541L for details
on the `'lafferty'` method.
Parameters
----------
sed_type : {'dnde'}
SED type to convert to.
model : `~gammapy.spectrum.models.SpectralModel`
Spectral model assumption. Note that the value of the amplitude parameter
does not matter. Still it is recommended to use something with the right
scale and units. E.g. `amplitude = 1e-12 * u.Unit('cm-2 s-1 TeV-1')`
method : {'lafferty', 'log_center', 'table'}
Flux points `e_ref` estimation method:
* `'laferty'` Lafferty & Wyatt model-based e_ref
* `'log_center'` log bin center e_ref
* `'table'` using column 'e_ref' from input flux_points
pwl_approx : bool
Use local power law appoximation at e_ref to compute differential flux
from the integral flux. This method is used by the Fermi-LAT catalogs.
Returns
-------
flux_points : `FluxPoints`
Flux points including differential quantity columns `dnde`
and `dnde_err` (optional), `dnde_ul` (optional).
Examples
--------
>>> from astropy import units as u
>>> from gammapy.spectrum import FluxPoints
>>> from gammapy.spectrum.models import PowerLaw
>>> filename = '$GAMMAPY_EXTRA/test_datasets/spectrum/flux_points/flux_points.fits'
>>> flux_points = FluxPoints.read(filename)
>>> model = PowerLaw(2.2 * u.Unit(''), 1e-12 * u.Unit('cm-2 s-1 TeV-1'), 1 * u.TeV)
>>> flux_points_dnde = flux_points.to_sed_type('dnde', model=model)
"""
# TODO: implement other directions. Refactor!
if sed_type != "dnde":
raise NotImplementedError
if model is None:
model = PowerLaw(
index=2 * u.Unit(""),
amplitude=1 * u.Unit("cm-2 s-1 TeV-1"),
reference=1 * u.TeV,
)
input_table = self.table.copy()
e_min, e_max = self.e_min, self.e_max
# Compute e_ref
if method == "table":
e_ref = input_table["e_ref"].quantity
elif method == "log_center":
e_ref = | np.sqrt(e_min * e_max) | numpy.sqrt |
import math
import numpy as np
import tensorflow as tf
import utils as ut
from matplotlib.patches import Ellipse
BATCHSIZE = 10000
K = 30
MAX_ITER = 800
LR = 0.1
COLOR_LIST = ['r', 'g', 'b', 'y', 'm', 'k']
IS_VALID = False
# data = np.load('data/data2D.npy')
data = | np.load('data/data100D.npy') | numpy.load |
import numpy as np
from PIL import Image, ImageDraw
from .colors import *
def colorize_class_preds(class_maps, no_classes):
# class maps are level-batch-class-H-W
np_arrays = []
for lvl in class_maps:
lvl = map_color_values(lvl, no_classes, True)
np_arrays.append(lvl)
return np_arrays
def normalize_centerness(center_maps):
p_min = 1E6
p_max = -1E6
for lvl in center_maps:
p_min = np.min([p_min, np.min(lvl)])
p_max = np.max([p_max, np.max(lvl)])
normed_imgs = []
for lvl in center_maps:
lvl = (lvl - p_min) / (p_max - p_min) * 255
normed_imgs.append(lvl)
return normed_imgs
def image_pyramid(pred_maps, target_size):
"""Turns as series of images to a column of target_size images."""
resized_imgs = []
for lvl in pred_maps:
lvl = lvl.astype(np.uint8)
lvl_img = Image.fromarray(lvl)
lvl_img = lvl_img.resize(target_size[::-1])
lvl_img = np.array(lvl_img)
resized_imgs.append(lvl_img)
resized_imgs.append(np.full((10,) + lvl_img.shape[1:], 255))
img_cat = np.concatenate(resized_imgs)
return img_cat.astype(np.uint8)
def get_present_classes(classes_vis):
"""Finds all classes that exist in a given picture."""
unique_vals = []
for vis in classes_vis:
if isinstance(vis, np.ndarray):
unique_vals.extend(np.unique(vis))
else:
unique_vals.extend(np.unique(vis.cpu().numpy()))
ret = set(unique_vals)
try:
ret.remove(-1)
except KeyError:
pass
ret = list(ret)
ret.sort()
return ret
def stitch_big_image(images_list):
"""Stitches separate np.ndarray images into a single large array."""
if isinstance(images_list[0], np.ndarray):
# stitch vertically
# stack to 3 channels if necessary
max_len = 0
for ind, ele in enumerate(images_list):
if ele.shape[-1] == 1:
images_list[ind] = np.concatenate([ele, ele, ele], -1)
if ele.shape[1] > max_len:
max_len = ele.shape[1]
for ind, ele in enumerate(images_list):
if ele.shape[1] < max_len:
pad_ele = np.zeros(
(ele.shape[0], max_len-ele.shape[1], 3), np.uint8
)
images_list[ind] = np.concatenate([pad_ele, images_list[
ind]], 1)
return np.concatenate(images_list, 0)
else:
# stitch horizontally
stich_list = [stitch_big_image(im) for im in images_list]
return np.concatenate(stich_list, 1)
def add_class_legend(img, classes, present_classes):
"""Adds the class legend to the side of an image."""
max_len = max([len(x) for x in classes])
no_cl = len(classes)
spacer = 20
canv = | np.ones((img.shape[0], 25 + max_len * 7, 3)) | numpy.ones |
import os
from sklearn.decomposition import PCA
import skimage
import menpo
from skimage import io as sio
from skimage import transform
import numpy as np
import glob
from menpo import io as mio
from .utils import is_landmark_file, is_image_file, LMK_EXTENSIONS, \
IMG_EXTENSIONS
from matplotlib import pyplot as plt
from menpo.image import Image
from menpo.landmark import LandmarkManager
from menpo.shape import PointCloud
from tqdm import tqdm
class SingleImage(object):
"""
Holds Single Image
"""
def __init__(self, img, lmk, **kwargs):
"""
Parameters
----------
img: np.ndarray
actual image pixels
lmk: np.ndarray
landmarks
kwargs: dict
additional kwargs like file paths
"""
self.img = img
self.lmk = lmk
for key, val in kwargs.items():
setattr(self, key, val)
@classmethod
def from_files(cls, file):
"""
Create class from image or landmark file
Parameters
----------
file: string
path to image or landmarkfile
Returns
-------
class instance
"""
is_img_file = is_image_file(file)
is_lmk_file = is_landmark_file(file)
img, lmk = None, None
if is_img_file:
img = sio.imread(file)
img_file = file,
lmk_file = None
for ext in LMK_EXTENSIONS:
curr_ext = "." + file.rsplit(".", maxsplit=1)[-1]
_lmk_file = file.replace(curr_ext, ext)
if os.path.isfile(_lmk_file):
lmk = np.loadtxt(_lmk_file)
lmk_file = _lmk_file
elif is_lmk_file:
lmk = | np.loadtxt(file) | numpy.loadtxt |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sps
import pandas as pd
from mars import dataframe as md
from mars import tensor as mt
from mars.core import get_tiled
from mars.tensor.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \
expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \
hsplit, vsplit, dsplit, roll, squeeze, diff, ediff1d, flip, flipud, fliplr, repeat, tile, \
isin, searchsorted, unique, sort, argsort, partition, argpartition, topk, argtopk, \
trapz, shape, to_gpu, to_cpu, swapaxes
from mars.tensor.datasource import tensor, ones, zeros, arange
from mars.tests.core import require_cupy, TestBase
class Test(TestBase):
def setUp(self):
self.ctx, self.executor = self._create_test_context()
def testRechunkExecution(self):
raw = np.random.RandomState(0).random((11, 8))
arr = tensor(raw, chunk_size=3)
arr2 = arr.rechunk(4)
res = self.executor.execute_tensor(arr2)
self.assertTrue(np.array_equal(res[0], raw[:4, :4]))
self.assertTrue(np.array_equal(res[1], raw[:4, 4:]))
self.assertTrue(np.array_equal(res[2], raw[4:8, :4]))
self.assertTrue(np.array_equal(res[3], raw[4:8, 4:]))
self.assertTrue(np.array_equal(res[4], raw[8:, :4]))
self.assertTrue(np.array_equal(res[5], raw[8:, 4:]))
def testCopytoExecution(self):
a = ones((2, 3), chunk_size=1)
b = tensor([3, -1, 3], chunk_size=2)
copyto(a, b, where=b > 1)
res = self.executor.execute_tensor(a, concat=True)[0]
expected = np.array([[3, 1, 3], [3, 1, 3]])
np.testing.assert_equal(res, expected)
a = ones((2, 3), chunk_size=1)
b = tensor(np.asfortranarray(np.random.rand(2, 3)), chunk_size=2)
copyto(b, a)
res = self.executor.execute_tensor(b, concat=True)[0]
expected = np.asfortranarray(np.ones((2, 3)))
np.testing.assert_array_equal(res, expected)
self.assertTrue(res.flags['F_CONTIGUOUS'])
self.assertFalse(res.flags['C_CONTIGUOUS'])
def testAstypeExecution(self):
raw = np.random.random((10, 5))
arr = tensor(raw, chunk_size=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw.astype('i8'))
raw = sps.random(10, 5, density=.2)
arr = tensor(raw, chunk_size=3)
arr2 = arr.astype('i8')
res = self.executor.execute_tensor(arr2, concat=True)
self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray()))
raw = np.asfortranarray(np.random.random((10, 5)))
arr = tensor(raw, chunk_size=3)
arr2 = arr.astype('i8', order='C')
res = self.executor.execute_tensor(arr2, concat=True)[0]
np.testing.assert_array_equal(res, raw.astype('i8'))
self.assertTrue(res.flags['C_CONTIGUOUS'])
self.assertFalse(res.flags['F_CONTIGUOUS'])
def testTransposeExecution(self):
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw.T)
arr3 = transpose(arr, axes=(-2, -1, -3))
res = self.executor.execute_tensor(arr3, concat=True)
np.testing.assert_array_equal(res[0], raw.transpose(1, 2, 0))
raw = sps.random(11, 8)
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr)
self.assertTrue(arr2.issparse())
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0].toarray(), raw.T.toarray())
# test order
raw = np.asfortranarray(np.random.random((11, 8, 5)))
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = np.transpose(raw).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
arr = tensor(raw, chunk_size=3)
arr2 = transpose(arr, (1, 2, 0))
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = np.transpose(raw, (1, 2, 0)).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
df = md.DataFrame(mt.random.rand(10, 5, chunk_size=5))
df = df[df[0] < 1]
# generate tensor with unknown shape
t = df.to_tensor()
t2 = transpose(t)
res = self.executor.execute_tensor(t2, concat=True)[0]
self.assertEqual(res.shape, (5, 10))
def testSwapaxesExecution(self):
raw = np.random.random((11, 8, 5))
arr = swapaxes(raw, 2, 0)
res = self.executor.execute_tensor(arr, concat=True)
np.testing.assert_array_equal(res[0], raw.swapaxes(2, 0))
raw = np.random.random((11, 8, 5))
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0], raw.swapaxes(2, 0))
raw = sps.random(11, 8, density=.2)
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)
np.testing.assert_array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0))
# test order
raw = np.asfortranarray(np.random.rand(11, 8, 5))
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(2, 0)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.swapaxes(2, 0).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(0, 2)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.swapaxes(0, 2).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
arr = tensor(raw, chunk_size=3)
arr2 = arr.swapaxes(1, 0)
res = self.executor.execute_tensor(arr2, concat=True)[0]
expected = raw.swapaxes(1, 0).copy(order='A')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
def testMoveaxisExecution(self):
x = zeros((3, 4, 5), chunk_size=2)
t = moveaxis(x, 0, -1)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (4, 5, 3))
t = moveaxis(x, -1, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 3, 4))
t = moveaxis(x, [0, 1], [-1, -2])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
t = moveaxis(x, [0, 1, 2], [-1, -2, -3])
res = self.executor.execute_tensor(t, concat=True)[0]
self.assertEqual(res.shape, (5, 4, 3))
def testBroadcastToExecution(self):
raw = np.random.random((10, 5, 1))
arr = tensor(raw, chunk_size=2)
arr2 = broadcast_to(arr, (5, 10, 5, 6))
res = self.executor.execute_tensor(arr2, concat=True)[0]
np.testing.assert_array_equal(res, np.broadcast_to(raw, (5, 10, 5, 6)))
# test chunk with unknown shape
arr1 = mt.random.rand(3, 4, chunk_size=2)
arr2 = mt.random.permutation(arr1)
arr3 = broadcast_to(arr2, (2, 3, 4))
res = self.executor.execute_tensor(arr3, concat=True)[0]
self.assertEqual(res.shape, (2, 3, 4))
def testBroadcastArraysExecutions(self):
x_data = [[1, 2, 3]]
x = tensor(x_data, chunk_size=1)
y_data = [[1], [2], [3]]
y = tensor(y_data, chunk_size=2)
a = broadcast_arrays(x, y)
res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a]
expected = np.broadcast_arrays(x_data, y_data)
for r, e in zip(res, expected):
np.testing.assert_equal(r, e)
def testWhereExecution(self):
raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?')
raw_x = np.random.rand(4, 1)
raw_y = np.random.rand(4, 4)
cond, x, y = tensor(raw_cond, chunk_size=2), tensor(raw_x, chunk_size=2), tensor(raw_y, chunk_size=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)
self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y)))
raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?'))
raw_x = sps.random(4, 1, density=.1)
raw_y = sps.random(4, 4, density=.1)
cond, x, y = tensor(raw_cond, chunk_size=2), tensor(raw_x, chunk_size=2), tensor(raw_y, chunk_size=2)
arr = where(cond, x, y)
res = self.executor.execute_tensor(arr, concat=True)[0]
self.assertTrue(np.array_equal(res.toarray(),
np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray())))
# GH 2009
raw_x = np.arange(9.).reshape(3, 3)
x = arange(9.).reshape(3, 3)
arr = where(x < 5, 2, -1)
res = self.executor.execute_tensor(arr, concat=True)[0]
np.testing.assert_array_equal(res, np.where(raw_x < 5, 2, -1))
def testReshapeExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunk_size=6)
y = x.reshape(-1, 30)
res = self.executor.execute_tensor(y, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(-1, 30))
y2 = x.reshape(10, -1)
res = self.executor.execute_tensor(y2, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(10, -1))
y3 = x.reshape(-1)
res = self.executor.execute_tensor(y3, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(-1))
y4 = x.ravel()
res = self.executor.execute_tensor(y4, concat=True)
np.testing.assert_array_equal(res[0], raw_data.ravel())
raw_data = np.random.rand(30, 100, 20)
x = tensor(raw_data, chunk_size=6)
y = x.reshape(-1, 20, 5, 5, 4)
res = self.executor.execute_tensor(y, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4))
y2 = x.reshape(3000, 10, 2)
res = self.executor.execute_tensor(y2, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(3000, 10, 2))
y3 = x.reshape(60, 25, 40)
res = self.executor.execute_tensor(y3, concat=True)
np.testing.assert_array_equal(res[0], raw_data.reshape(60, 25, 40))
y4 = x.reshape(60, 25, 40)
y4.op.extra_params['_reshape_with_shuffle'] = True
size_res = self.executor.execute_tensor(y4, mock=True)
res = self.executor.execute_tensor(y4, concat=True)
self.assertEqual(res[0].nbytes, sum(v[0] for v in size_res))
self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40)))
y5 = x.ravel(order='F')
res = self.executor.execute_tensor(y5, concat=True)[0]
expected = raw_data.ravel(order='F')
np.testing.assert_array_equal(res, expected)
self.assertEqual(res.flags['C_CONTIGUOUS'], expected.flags['C_CONTIGUOUS'])
self.assertEqual(res.flags['F_CONTIGUOUS'], expected.flags['F_CONTIGUOUS'])
def testExpandDimsExecution(self):
raw_data = np.random.rand(10, 20, 30)
x = tensor(raw_data, chunk_size=6)
y = expand_dims(x, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1)))
y = expand_dims(x, 0)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0)))
y = expand_dims(x, 3)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3)))
y = expand_dims(x, -1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1)))
y = expand_dims(x, -4)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4)))
with self.assertRaises(np.AxisError):
expand_dims(x, -5)
with self.assertRaises(np.AxisError):
expand_dims(x, 4)
def testRollAxisExecution(self):
x = ones((3, 4, 5, 6), chunk_size=1)
y = rollaxis(x, 3, 1)
res = self.executor.execute_tensor(y, concat=True)
self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1)))
def testAtleast1dExecution(self):
x = 1
y = ones(3, chunk_size=2)
z = ones((3, 4), chunk_size=2)
t = atleast_1d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([1])))
self.assertTrue(np.array_equal(res[1], np.ones(3)))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast2dExecution(self):
x = 1
y = ones(3, chunk_size=2)
z = ones((3, 4), chunk_size=2)
t = atleast_2d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.array([[1]])))
self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.ones((3, 4))))
def testAtleast3dExecution(self):
x = 1
y = ones(3, chunk_size=2)
z = ones((3, 4), chunk_size=2)
t = atleast_3d(x, y, z)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in t]
self.assertTrue(np.array_equal(res[0], np.atleast_3d(x)))
self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3))))
self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4)))))
def testArgwhereExecution(self):
x = arange(6, chunk_size=2).reshape(2, 3)
t = argwhere(x > 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(np.arange(6).reshape(2, 3) > 1)
np.testing.assert_array_equal(res, expected)
data = np.asfortranarray(np.random.rand(10, 20))
x = tensor(data, chunk_size=10)
t = argwhere(x > 0.5)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.argwhere(data > 0.5)
np.testing.assert_array_equal(res, expected)
self.assertTrue(res.flags['F_CONTIGUOUS'])
self.assertFalse(res.flags['C_CONTIGUOUS'])
def testArraySplitExecution(self):
x = arange(48, chunk_size=3).reshape(2, 3, 8)
ss = array_split(x, 3, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = array_split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
def testSplitExecution(self):
for a in ((1, 1, 1, 2, 2, 3), [1, 1, 1, 2, 2, 3]):
splits = split(a, (3, 5))
self.assertEqual(len(splits), 3)
splits0 = self.executor.execute_tensor(splits[0], concat=True)[0]
np.testing.assert_array_equal(splits0, (1, 1, 1))
splits1 = self.executor.execute_tensor(splits[1], concat=True)[0]
np.testing.assert_array_equal(splits1, (2, 2))
splits2 = self.executor.execute_tensor(splits[2], concat=True)[0]
np.testing.assert_array_equal(splits2, (3,))
x = arange(48, chunk_size=3).reshape(2, 3, 8)
ss = split(x, 4, axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
ss = split(x, [3, 5, 6, 10], axis=2)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# hsplit
x = arange(120, chunk_size=3).reshape(2, 12, 5)
ss = hsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# vsplit
x = arange(48, chunk_size=3).reshape(8, 3, 2)
ss = vsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
# dsplit
x = arange(48, chunk_size=3).reshape(2, 3, 8)
ss = dsplit(x, 4)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r, e) for r, e in zip(res, expected)]
x_data = sps.random(12, 8, density=.1)
x = tensor(x_data, chunk_size=3)
ss = split(x, 4, axis=0)
res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss]
expected = np.split(x_data.toarray(), 4, axis=0)
self.assertEqual(len(res), len(expected))
[np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)]
def testRollExecution(self):
x = arange(10, chunk_size=2)
t = roll(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10), 2)
np.testing.assert_equal(res, expected)
x2 = x.reshape(2, 5)
t = roll(x2, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0)
np.testing.assert_equal(res, expected)
t = roll(x2, 1, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1)
np.testing.assert_equal(res, expected)
def testSqueezeExecution(self):
data = np.array([[[0], [1], [2]]])
x = tensor(data, chunk_size=1)
t = squeeze(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data)
np.testing.assert_equal(res, expected)
t = squeeze(x, axis=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.squeeze(data, axis=2)
np.testing.assert_equal(res, expected)
def testDiffExecution(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunk_size=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, n=2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, n=2)
np.testing.assert_equal(res, expected)
data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
x = tensor(data, chunk_size=2)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data)
np.testing.assert_equal(res, expected)
t = diff(x, axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(data, axis=0)
np.testing.assert_equal(res, expected)
x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64)
t = diff(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64))
np.testing.assert_equal(res, expected)
def testEdiff1d(self):
data = np.array([1, 2, 4, 7, 0])
x = tensor(data, chunk_size=2)
t = ediff1d(x)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
to_begin = tensor(-99, chunk_size=2)
to_end = tensor([88, 99], chunk_size=2)
t = ediff1d(x, to_begin=to_begin, to_end=to_end)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99]))
np.testing.assert_equal(res, expected)
data = [[1, 2, 4], [1, 6, 24]]
t = ediff1d(tensor(data, chunk_size=2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.ediff1d(data)
np.testing.assert_equal(res, expected)
def testFlipExecution(self):
a = arange(8, chunk_size=2).reshape((2, 2, 2))
t = flip(a, 0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 0)
np.testing.assert_equal(res, expected)
t = flip(a, 1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flip(np.arange(8).reshape(2, 2, 2), 1)
np.testing.assert_equal(res, expected)
t = flipud(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.flipud(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
t = fliplr(a)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.fliplr(np.arange(8).reshape(2, 2, 2))
np.testing.assert_equal(res, expected)
def testRepeatExecution(self):
a = repeat(3, 4)
res = self.executor.execute_tensor(a)[0]
expected = np.repeat(3, 4)
np.testing.assert_equal(res, expected)
x_data = np.random.randn(20, 30)
x = tensor(x_data, chunk_size=(3, 4))
t = repeat(x, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 2)
np.testing.assert_equal(res, expected)
t = repeat(x, 3, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, 3, axis=1)
np.testing.assert_equal(res, expected)
t = repeat(x, np.arange(20), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
t = repeat(x, arange(20, chunk_size=5), axis=0)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data, np.arange(20), axis=0)
np.testing.assert_equal(res, expected)
x_data = sps.random(20, 30, density=.1)
x = tensor(x_data, chunk_size=(3, 4))
t = repeat(x, 2, axis=1)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.repeat(x_data.toarray(), 2, axis=1)
np.testing.assert_equal(res.toarray(), expected)
def testTileExecution(self):
a_data = np.array([0, 1, 2])
a = tensor(a_data, chunk_size=2)
t = tile(a, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, 2)
np.testing.assert_equal(res, expected)
t = tile(a, (2, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 2))
np.testing.assert_equal(res, expected)
t = tile(a, (2, 1, 2))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(a_data, (2, 1, 2))
np.testing.assert_equal(res, expected)
b_data = np.array([[1, 2], [3, 4]])
b = tensor(b_data, chunk_size=1)
t = tile(b, 2)
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, 2)
np.testing.assert_equal(res, expected)
t = tile(b, (2, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(b_data, (2, 1))
np.testing.assert_equal(res, expected)
c_data = np.array([1, 2, 3, 4])
c = tensor(c_data, chunk_size=3)
t = tile(c, (4, 1))
res = self.executor.execute_tensor(t, concat=True)[0]
expected = np.tile(c_data, (4, 1))
np.testing.assert_equal(res, expected)
def testIsInExecution(self):
element = 2 * arange(4, chunk_size=1).reshape((2, 2))
test_elements = [1, 2, 4, 8]
mask = isin(element, test_elements)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([2, 4])
np.testing.assert_equal(res, expected)
mask = isin(element, test_elements, invert=True)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True)
np.testing.assert_equal(res, expected)
res = self.executor.execute_tensor(element[mask], concat=True)[0]
expected = np.array([0, 6])
np.testing.assert_equal(res, expected)
test_set = {1, 2, 4, 8}
mask = isin(element, test_set)
res = self.executor.execute_tensor(mask, concat=True)[0]
expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set)
np.testing.assert_equal(res, expected)
def testRavelExecution(self):
arr = ones((10, 5), chunk_size=2)
flat_arr = mt.ravel(arr)
res = self.executor.execute_tensor(flat_arr, concat=True)[0]
self.assertEqual(len(res), 50)
np.testing.assert_equal(res, np.ones(50))
def testSearchsortedExecution(self):
raw = np.sort(np.random.randint(100, size=(16,)))
# test different chunk_size, 3 will have combine, 6 will skip combine
for chunk_size in (3, 6):
arr = tensor(raw, chunk_size=chunk_size)
# test scalar, with value in the middle
t1 = searchsorted(arr, 20)
res = self.executor.execute_tensor(t1, concat=True)[0]
expected = np.searchsorted(raw, 20)
np.testing.assert_array_equal(res, expected)
# test scalar, with value larger than 100
t2 = searchsorted(arr, 200)
res = self.executor.execute_tensor(t2, concat=True)[0]
expected = np.searchsorted(raw, 200)
np.testing.assert_array_equal(res, expected)
# test scalar, side left, with value exact in the middle of the array
t3 = searchsorted(arr, raw[10], side='left')
res = self.executor.execute_tensor(t3, concat=True)[0]
expected = np.searchsorted(raw, raw[10], side='left')
np.testing.assert_array_equal(res, expected)
# test scalar, side right, with value exact in the middle of the array
t4 = searchsorted(arr, raw[10], side='right')
res = self.executor.execute_tensor(t4, concat=True)[0]
expected = np.searchsorted(raw, raw[10], side='right')
np.testing.assert_array_equal(res, expected)
# test scalar, side left, with value exact in the end of the array
t5 = searchsorted(arr, raw[15], side='left')
res = self.executor.execute_tensor(t5, concat=True)[0]
expected = np.searchsorted(raw, raw[15], side='left')
np.testing.assert_array_equal(res, expected)
# test scalar, side right, with value exact in the end of the array
t6 = searchsorted(arr, raw[15], side='right')
res = self.executor.execute_tensor(t6, concat=True)[0]
expected = np.searchsorted(raw, raw[15], side='right')
np.testing.assert_array_equal(res, expected)
# test scalar, side left, with value exact in the start of the array
t7 = searchsorted(arr, raw[0], side='left')
res = self.executor.execute_tensor(t7, concat=True)[0]
expected = np.searchsorted(raw, raw[0], side='left')
np.testing.assert_array_equal(res, expected)
# test scalar, side right, with value exact in the start of the array
t8 = searchsorted(arr, raw[0], side='right')
res = self.executor.execute_tensor(t8, concat=True)[0]
expected = np.searchsorted(raw, raw[0], side='right')
np.testing.assert_array_equal(res, expected)
raw2 = np.random.randint(100, size=(3, 4))
# test tensor, side left
t9 = searchsorted(arr, tensor(raw2, chunk_size=2), side='left')
res = self.executor.execute_tensor(t9, concat=True)[0]
expected = np.searchsorted(raw, raw2, side='left')
np.testing.assert_array_equal(res, expected)
# test tensor, side right
t10 = searchsorted(arr, tensor(raw2, chunk_size=2), side='right')
res = self.executor.execute_tensor(t10, concat=True)[0]
expected = np.searchsorted(raw, raw2, side='right')
np.testing.assert_array_equal(res, expected)
# test one chunk
arr = tensor(raw, chunk_size=16)
# test scalar, tensor to search has 1 chunk
t11 = searchsorted(arr, 20)
res = self.executor.execute_tensor(t11, concat=True)[0]
expected = np.searchsorted(raw, 20)
np.testing.assert_array_equal(res, expected)
# test tensor with 1 chunk, tensor to search has 1 chunk
t12 = searchsorted(arr, tensor(raw2, chunk_size=4))
res = self.executor.execute_tensor(t12, concat=True)[0]
expected = np.searchsorted(raw, raw2)
np.testing.assert_array_equal(res, expected)
# test tensor with more than 1 chunk, tensor to search has 1 chunk
t13 = searchsorted(arr, tensor(raw2, chunk_size=2))
res = self.executor.execute_tensor(t13, concat=True)[0]
expected = np.searchsorted(raw, raw2)
np.testing.assert_array_equal(res, expected)
# test sorter
raw3 = np.random.randint(100, size=(16,))
arr = tensor(raw3, chunk_size=3)
order = np.argsort(raw3)
order_arr = tensor(order, chunk_size=4)
t14 = searchsorted(arr, 20, sorter=order_arr)
res = self.executor.execute_tensor(t14, concat=True)[0]
expected = np.searchsorted(raw3, 20, sorter=order)
np.testing.assert_array_equal(res, expected)
def testUniqueExecution(self):
rs = np.random.RandomState(0)
raw = rs.randint(10, size=(10,))
for chunk_size in (10, 3):
x = tensor(raw, chunk_size=chunk_size)
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
expected = np.unique(raw)
np.testing.assert_array_equal(res, expected)
y, indices = unique(x, return_index=True)
res = self.executor.execute_tensors([y, indices])
expected = np.unique(raw, return_index=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, inverse = unique(x, return_inverse=True)
res = self.executor.execute_tensors([y, inverse])
expected = np.unique(raw, return_inverse=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, counts = unique(x, return_counts=True)
res = self.executor.execute_tensors([y, counts])
expected = np.unique(raw, return_counts=True)
self.assertEqual(len(res), 2)
self.assertEqual(len(expected), 2)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
y, indices, inverse, counts = unique(x, return_index=True,
return_inverse=True, return_counts=True)
res = self.executor.execute_tensors([y, indices, inverse, counts])
expected = np.unique(raw, return_index=True,
return_inverse=True, return_counts=True)
self.assertEqual(len(res), 4)
self.assertEqual(len(expected), 4)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
np.testing.assert_array_equal(res[2], expected[2])
np.testing.assert_array_equal(res[3], expected[3])
y, indices, counts = unique(x, return_index=True, return_counts=True)
res = self.executor.execute_tensors([y, indices, counts])
expected = np.unique(raw, return_index=True, return_counts=True)
self.assertEqual(len(res), 3)
self.assertEqual(len(expected), 3)
np.testing.assert_array_equal(res[0], expected[0])
np.testing.assert_array_equal(res[1], expected[1])
np.testing.assert_array_equal(res[2], expected[2])
raw2 = rs.randint(10, size=(4, 5, 6))
x2 = tensor(raw2, chunk_size=chunk_size)
y2 = unique(x2)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2)
np.testing.assert_array_equal(res, expected)
y2 = unique(x2, axis=1)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2, axis=1)
np.testing.assert_array_equal(res, expected)
y2 = unique(x2, axis=2)
res = self.executor.execute_tensor(y2, concat=True)[0]
expected = np.unique(raw2, axis=2)
np.testing.assert_array_equal(res, expected)
raw = rs.randint(10, size=(10, 20))
raw[:, 0] = raw[:, 11] = rs.randint(10, size=(10,))
x = tensor(raw, chunk_size=2)
y, ind, inv, counts = unique(x, aggregate_size=3, axis=1, return_index=True,
return_inverse=True, return_counts=True)
res_unique, res_ind, res_inv, res_counts = self.executor.execute_tensors((y, ind, inv, counts))
exp_unique, exp_ind, exp_counts = np.unique(raw, axis=1, return_index=True, return_counts=True)
raw_res_unique = res_unique
res_unique_df = pd.DataFrame(res_unique)
res_unique_ind = np.asarray(res_unique_df.sort_values(list(range(res_unique.shape[0])),
axis=1).columns)
res_unique = res_unique[:, res_unique_ind]
res_ind = res_ind[res_unique_ind]
res_counts = res_counts[res_unique_ind]
np.testing.assert_array_equal(res_unique, exp_unique)
np.testing.assert_array_equal(res_ind, exp_ind)
np.testing.assert_array_equal(raw_res_unique[:, res_inv], raw)
np.testing.assert_array_equal(res_counts, exp_counts)
x = (mt.random.RandomState(0).rand(1000, chunk_size=20) > 0.5).astype(np.int32)
y = unique(x)
res = np.sort(self.executor.execute_tensor(y, concat=True)[0])
np.testing.assert_array_equal(res, np.array([0, 1]))
# test sparse
sparse_raw = sps.random(10, 3, density=0.1, format='csr', random_state=rs)
x = tensor(sparse_raw, chunk_size=2)
y = unique(x)
res = np.sort(self.executor.execute_tensor(y, concat=True)[0])
np.testing.assert_array_equal(res, np.unique(sparse_raw.data))
# test empty
x = tensor([])
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
np.testing.assert_array_equal(res, np.unique([]))
x = tensor([[]])
y = unique(x)
res = self.executor.execute_tensor(y, concat=True)[0]
np.testing.assert_array_equal(res, np.unique([[]]))
@require_cupy
def testToGPUExecution(self):
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=3)
gx = to_gpu(x)
res = self.executor.execute_tensor(gx, concat=True)[0]
np.testing.assert_array_equal(res.get(), raw)
@require_cupy
def testToCPUExecution(self):
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=3, gpu=True)
cx = to_cpu(x)
res = self.executor.execute_tensor(cx, concat=True)[0]
np.testing.assert_array_equal(res, raw)
def testSortExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# 1-d chunk
raw = np.random.rand(100)
x = tensor(raw, chunk_size=10)
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# test force need_align=True
sx = sort(x)
sx.op._need_align = True
res = self.executor.execute_tensor(sx, concat=True)[0]
self.assertEqual(get_tiled(sx).nsplits, get_tiled(x).nsplits)
np.testing.assert_array_equal(res, np.sort(raw))
# test psrs_kinds
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# structured dtype
raw = np.empty(100, dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=100, dtype=np.int32)
raw['size'] = np.random.randint(1000, size=100, dtype=np.int64)
x = tensor(raw, chunk_size=10)
sx = sort(x, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
# test psrs_kinds with structured dtype
sx = sort(x, order=['size', 'id'], psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
# test flatten case
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=5)
sx = sort(x, axis=None)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=None))
# test multi-dimension
raw = np.random.rand(10, 100)
x = tensor(raw, chunk_size=(2, 10))
sx = sort(x, psrs_kinds=['quicksort'] * 3)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
raw = np.random.rand(10, 99)
x = tensor(raw, chunk_size=(2, 10))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
# test 3-d
raw = np.random.rand(20, 25, 28)
x = tensor(raw, chunk_size=(10, 5, 7))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, axis=0)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0))
sx = sort(x, axis=0, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0))
sx = sort(x, axis=1)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
sx = sort(x, axis=1, psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
# test multi-dimension with structured type
raw = np.empty((10, 100), dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=(10, 100), dtype=np.int32)
raw['size'] = np.random.randint(1000, size=(10, 100), dtype=np.int64)
x = tensor(raw, chunk_size=(3, 10))
sx = sort(x)
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw))
sx = sort(x, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size', 'id']))
sx = sort(x, order=['size'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, order=['size']))
sx = sort(x, axis=0, order=['size', 'id'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0, order=['size', 'id']))
sx = sort(x, axis=0, order=['size', 'id'],
psrs_kinds=[None, None, 'quicksort'])
res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=0, order=['size', 'id']))
# test inplace sort
raw = np.random.rand(10, 12)
a = tensor(raw, chunk_size=(5, 4))
a.sort(axis=1)
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw, axis=1))
a.sort(axis=0)
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(np.sort(raw, axis=1), axis=0))
# test with empty chunk
raw = np.random.rand(20, 10)
raw[:, :8] = 1
a = tensor(raw, chunk_size=5)
filtered = a[a < 1]
filtered.sort()
res = self.executor.execute_tensor(filtered, concat=True)[0]
np.testing.assert_array_equal(res, np.sort(raw[raw < 1]))
def testSortIndicesExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
r = sort(x, return_index=True)
sr, si = self.executor.execute_tensors(r)
np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))
x = tensor(raw, chunk_size=(22, 4))
r = sort(x, return_index=True)
sr, si = self.executor.execute_tensors(r)
np.testing.assert_array_equal(sr, np.take_along_axis(raw, si, axis=-1))
raw = np.random.rand(100)
x = tensor(raw, chunk_size=23)
r = sort(x, axis=0, return_index=True)
sr, si = self.executor.execute_tensors(r)
np.testing.assert_array_equal(sr, raw[si])
def testArgsort(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
xa = argsort(x)
r = self.executor.execute_tensor(xa, concat=True)[0]
np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))
x = tensor(raw, chunk_size=(22, 4))
xa = argsort(x)
r = self.executor.execute_tensor(xa, concat=True)[0]
np.testing.assert_array_equal(np.sort(raw), np.take_along_axis(raw, r, axis=-1))
raw = np.random.rand(100)
x = tensor(raw, chunk_size=23)
xa = argsort(x, axis=0)
r = self.executor.execute_tensor(xa, concat=True)[0]
np.testing.assert_array_equal(np.sort(raw, axis=0), raw[r])
def testPartitionExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
px = partition(x, [1, 8])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res, np.partition(raw, [1, 8]))
# 1-d chunk
raw = np.random.rand(100)
x = tensor(raw, chunk_size=10)
kth = np.random.RandomState(0).randint(-100, 100, size=(10,))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[kth], np.partition(raw, kth)[kth])
# structured dtype
raw = np.empty(100, dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=100, dtype=np.int32)
raw['size'] = np.random.randint(1000, size=100, dtype=np.int64)
x = tensor(raw, chunk_size=10)
px = partition(x, kth, order=['size', 'id'])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[kth], np.partition(raw, kth, order=['size', 'id'])[kth])
# test flatten case
raw = np.random.rand(10, 10)
x = tensor(raw, chunk_size=5)
px = partition(x, kth, axis=None)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[kth], np.partition(raw, kth, axis=None)[kth])
# test multi-dimension
raw = np.random.rand(10, 100)
x = tensor(raw, chunk_size=(2, 10))
kth = np.random.RandomState(0).randint(-10, 10, size=(3,))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[:, kth], np.partition(raw, kth)[:, kth])
raw = np.random.rand(10, 99)
x = tensor(raw, chunk_size=(2, 10))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[:, kth], np.partition(raw, kth)[:, kth])
# test 3-d
raw = np.random.rand(20, 25, 28)
x = tensor(raw, chunk_size=(10, 5, 7))
kth = np.random.RandomState(0).randint(-28, 28, size=(3,))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[:, :, kth], np.partition(raw, kth)[:, :, kth])
kth = np.random.RandomState(0).randint(-20, 20, size=(3,))
px = partition(x, kth, axis=0)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[kth], np.partition(raw, kth, axis=0)[kth])
kth = np.random.RandomState(0).randint(-25, 25, size=(3,))
px = partition(x, kth, axis=1)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[:, kth], np.partition(raw, kth, axis=1)[:, kth])
# test multi-dimension with structured type
raw = np.empty((10, 100), dtype=[('id', np.int32), ('size', np.int64)])
raw['id'] = np.random.randint(1000, size=(10, 100), dtype=np.int32)
raw['size'] = np.random.randint(1000, size=(10, 100), dtype=np.int64)
x = tensor(raw, chunk_size=(3, 10))
kth = np.random.RandomState(0).randint(-100, 100, size=(10,))
px = partition(x, kth)
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(res[:, kth], np.partition(raw, kth)[:, kth])
px = partition(x, kth, order=['size', 'id'])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[:, kth], np.partition(raw, kth, order=['size', 'id'])[:, kth])
px = partition(x, kth, order=['size'])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[:, kth], np.partition(raw, kth, order=['size'])[:, kth])
kth = np.random.RandomState(0).randint(-10, 10, size=(5,))
px = partition(x, kth, axis=0, order=['size', 'id'])
res = self.executor.execute_tensor(px, concat=True)[0]
np.testing.assert_array_equal(
res[kth], np.partition(raw, kth, axis=0, order=['size', 'id'])[kth])
raw = np.random.rand(10, 12)
a = tensor(raw, chunk_size=(5, 4))
kth = np.random.RandomState(0).randint(-12, 12, size=(2,))
a.partition(kth, axis=1)
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(
res[:, kth], np.partition(raw, kth, axis=1)[:, kth])
kth = np.random.RandomState(0).randint(-10, 10, size=(2,))
a.partition(kth, axis=0)
raw_base = res
res = self.executor.execute_tensor(a, concat=True)[0]
np.testing.assert_array_equal(
res[kth], np.partition(raw_base, kth, axis=0)[kth])
# test kth which is tensor
raw = np.random.rand(10, 12)
a = tensor(raw, chunk_size=(3, 5))
kth = (mt.random.rand(5) * 24 - 12).astype(int)
px = partition(a, kth)
sx = sort(a)
res = self.executor.execute_tensor(px, concat=True)[0]
kth_res = self.executor.execute_tensor(kth, concat=True)[0]
sort_res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res[:, kth_res], sort_res[:, kth_res])
a = tensor(raw, chunk_size=(10, 12))
kth = (mt.random.rand(5) * 24 - 12).astype(int)
px = partition(a, kth)
sx = sort(a)
res = self.executor.execute_tensor(px, concat=True)[0]
kth_res = self.executor.execute_tensor(kth, concat=True)[0]
sort_res = self.executor.execute_tensor(sx, concat=True)[0]
np.testing.assert_array_equal(res[:, kth_res], sort_res[:, kth_res])
def testPartitionIndicesExecution(self):
# only 1 chunk when axis = -1
raw = np.random.rand(100, 10)
x = tensor(raw, chunk_size=10)
kth = [2, 5, 9]
r = partition(x, kth, return_index=True)
pr, pi = self.executor.execute_tensors(r)
np.testing.assert_array_equal(pr, | np.take_along_axis(raw, pi, axis=-1) | numpy.take_along_axis |
#!/usr/bin/env python
import numpy as np
from kernel_tuner import run_kernel
def fix_2n(c, N):
Xa_r, Xa_i, Xb_r, Xb_i = np.zeros((4, N//2), dtype=np.float32)
for i in range(1,N//2):
Xa_r[i] = (c[N-i].real + c[i].real) / 2
Xa_i[i] = (c[i].imag - c[N-i].imag) / 2
Xb_r[i] = (c[N-i].imag + c[i].imag) / 2
Xb_i[i] = (c[N-i].real - c[i].real) / 2
Xa_r[0] = c[0].real
Xb_r[0] = c[0].imag
return Xa_r, Xa_i, Xb_r, Xb_i
def test_2N_r2cfft():
N = 1024
signal1, signal2 = np.random.normal(size=(2, N)).astype(np.float32)
x = np.c_[signal1,signal2]
y = np.zeros_like(x)
_, y = run_kernel("fft_1024", "fft1024_mc_fma.cl", 1, [x, y], {"TESTING": 1, "block_size_x": 1024})
c = y[...,0]+1j*y[...,1]
Xa_r, Xa_i, Xb_r, Xb_i = fix_2n(c, N)
signal1_ans = Xa_r+1j*Xa_i
signal2_ans = Xb_r+1j*Xb_i
signal1_ref = np.fft.rfft(signal1)[:-1]
signal2_ref = np.fft.rfft(signal2)[:-1]
print(signal1_ans)
print(signal1_ref)
assert abs(signal1_ans - signal1_ref).max() < 1e-3
print(signal2_ans)
print(signal2_ref)
assert abs(signal2_ans - signal2_ref).max() < 1e-3
def test_2n():
N = 1024
signal1, signal2 = np.random.normal(size=(2, N)).astype(np.float32)
x = signal1 + 1j * signal2
y = np.fft.fft(x).astype(np.complex64)
Xa, Xb = np.zeros((2, N), dtype=np.float32)
_, Xa, Xb = run_kernel("test_fix_2n", "fft1024_mc_fma.cl", 1, [y, Xa, Xb], {"TESTING": 1, "block_size_x": 1})
signal1_ref = np.fft.rfft(signal1)[:-1]
signal2_ref = | np.fft.rfft(signal2) | numpy.fft.rfft |
# pvtrace is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pvtrace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from external.transformations import translation_matrix, rotation_matrix
import external.transformations as tf
from Trace import Photon
from Geometry import Box, Cylinder, FinitePlane, transform_point, transform_direction, rotation_matrix_from_vector_alignment, norm
from Materials import Spectrum
def random_spherecial_vector():
# This method of calculating isotropic vectors is taken from GNU Scientific Library
LOOP = True
while LOOP:
x = -1. + 2. * np.random.uniform()
y = -1. + 2. * np.random.uniform()
s = x**2 + y**2
if s <= 1.0:
LOOP = False
z = -1. + 2. * s
a = 2 * np.sqrt(1 - s)
x = a * x
y = a * y
return np.array([x,y,z])
class SimpleSource(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, use_random_polarisation=False):
super(SimpleSource, self).__init__()
self.position = position
self.direction = direction
self.wavelength = wavelength
self.use_random_polarisation = use_random_polarisation
self.throw = 0
self.source_id = "SimpleSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
# If use_polarisation is set generate a random polarisation vector of the photon
if self.use_random_polarisation:
# Randomise rotation angle around xy-plane, the transform from +z to the direction of the photon
vec = random_spherecial_vector()
vec[2] = 0.
vec = norm(vec)
R = rotation_matrix_from_vector_alignment(self.direction, [0,0,1])
photon.polarisation = transform_direction(vec, R)
else:
photon.polarisation = None
photon.id = self.throw
self.throw = self.throw + 1
return photon
class Laser(object):
"""A light source that will generate photons of a single colour, direction and position."""
def __init__(self, position=[0,0,0], direction=[0,0,1], wavelength=555, polarisation=None):
super(Laser, self).__init__()
self.position = np.array(position)
self.direction = np.array(direction)
self.wavelength = wavelength
assert polarisation != None, "Polarisation of the Laser is not set."
self.polarisation = np.array(polarisation)
self.throw = 0
self.source_id = "LaserSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.position = np.array(self.position)
photon.direction = np.array(self.direction)
photon.active = True
photon.wavelength = self.wavelength
photon.polarisation = self.polarisation
photon.id = self.throw
self.throw = self.throw + 1
return photon
class PlanarSource(object):
"""A box that emits photons from the top surface (normal), sampled from the spectrum."""
def __init__(self, spectrum=None, wavelength=555, direction=(0,0,1), length=0.05, width=0.05):
super(PlanarSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.plane = FinitePlane(length=length, width=width)
self.length = length
self.width = width
# direction is the direction that photons are fired out of the plane in the GLOBAL FRAME.
# i.e. this is passed directly to the photon to set is's direction
self.direction = direction
self.throw = 0
self.source_id = "PlanarSource_" + str(id(self))
def translate(self, translation):
self.plane.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.plane.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Create a point which is on the surface of the finite plane in it's local frame
x = np.random.uniform(0., self.length)
y = np.random.uniform(0., self.width)
local_point = (x, y, 0.)
# Transform the direciton
photon.position = transform_point(local_point, self.plane.transform)
photon.direction = self.direction
photon.active = True
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSource(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.throw = 0
self.source_id = "LensSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
z = np.random.uniform(self.planeorigin[2],self.planeextent[2])
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2]
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class LensSourceAngle(object):
"""
A source where photons generated in a plane are focused on a line with space tolerance given by variable "focussize".
The focus line should be perpendicular to the plane normal and aligned with the z-axis.
For this lense an additional z-boost is added (Angle of incidence in z-direction).
"""
def __init__(self, spectrum = None, wavelength = 555, linepoint=(0,0,0), linedirection=(0,0,1), angle = 0, focussize = 0, planeorigin = (-1,-1,-1), planeextent = (-1,1,1)):
super(LensSourceAngle, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.planeorigin = planeorigin
self.planeextent = planeextent
self.linepoint = np.array(linepoint)
self.linedirection = np.array(linedirection)
self.focussize = focussize
self.angle = angle
self.throw = 0
self.source_id = "LensSourceAngle_" + str(id(self))
def photon(self):
photon = Photon()
photon.id = self.throw
self.throw = self.throw + 1
# Position
x = np.random.uniform(self.planeorigin[0],self.planeextent[0])
y = np.random.uniform(self.planeorigin[1],self.planeextent[1])
boost = y*np.tan(self.angle)
z = np.random.uniform(self.planeorigin[2],self.planeextent[2]) - boost
photon.position = np.array((x,y,z))
# Direction
focuspoint = np.array((0.,0.,0.))
focuspoint[0] = self.linepoint[0] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[1] = self.linepoint[1] + np.random.uniform(-self.focussize,self.focussize)
focuspoint[2] = photon.position[2] + boost
direction = focuspoint - photon.position
modulus = (direction[0]**2+direction[1]**2+direction[2]**2)**0.5
photon.direction = direction/modulus
# Wavelength
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
return photon
class CylindricalSource(object):
"""
A source for photons emitted in a random direction and position inside a cylinder(radius, length)
"""
def __init__(self, spectrum = None, wavelength = 555, radius = 1, length = 10):
super(CylindricalSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.shape = Cylinder(radius = radius, length = length)
self.radius = radius
self.length = length
self.throw = 0
self.source_id = "CylindricalSource_" + str(id(self))
def translate(self, translation):
self.shape.append_transform(tf.translation_matrix(translation))
def rotate(self, angle, axis):
self.shape.append_transform(tf.rotation_matrix(angle, axis))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
# Position of emission
phi = np.random.uniform(0., 2*np.pi)
r = np.random.uniform(0.,self.radius)
x = r*np.cos(phi)
y = r*np.sin(phi)
z = np.random.uniform(0.,self.length)
local_center = (x,y,z)
photon.position = transform_point(local_center, self.shape.transform)
# Direction of emission (no need to transform if meant to be isotropic)
phi = np.random.uniform(0.,2*np.pi)
theta = np.random.uniform(0.,np.pi)
x = np.cos(phi)*np.sin(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(theta)
local_direction = (x,y,z)
photon.direction = local_direction
# Set wavelength of photon
if self.spectrum != None:
photon.wavelength = self.spectrum.wavelength_at_probability(np.random.uniform())
else:
photon.wavelength = self.wavelength
# Further initialisation
photon.active = True
return photon
class PointSource(object):
"""
A point source that emits randomly in solid angle specified by phimin, ..., thetamax
"""
def __init__(self, spectrum = None, wavelength = 555, center = (0.,0.,0.), phimin = 0, phimax = 2*np.pi, thetamin = 0, thetamax = np.pi):
super(PointSource, self).__init__()
self.spectrum = spectrum
self.wavelength = wavelength
self.center = center
self.phimin = phimin
self.phimax = phimax
self.thetamin = thetamin
self.thetamax = thetamax
self.throw = 0
self.source_id = "PointSource_" + str(id(self))
def photon(self):
photon = Photon()
photon.source = self.source_id
photon.id = self.throw
self.throw = self.throw + 1
phi = | np.random.uniform(self.phimin, self.phimax) | numpy.random.uniform |
import pytheia as pt
import numpy as np
import cv2
from scipy.spatial.transform import Rotation as R
import time
dtype = np.float64
nr_runs = 100000
Deg2Rad = 180.0/np.pi
r1 = 15 * Deg2Rad
r2 = -10 * Deg2Rad
R1 = R.from_rotvec( | np.array([r1, 0, 0]) | numpy.array |
import pytest
from py_scripts.singleton_calling_utils import get_good_idxs, get_singleton_idx, levenshtein, reformat_genotypes, normalize_var
import numpy as np
def test_reformat_genotypes(genotype_array):
assert np.array_equal(reformat_genotypes(genotype_array), np.array([0, 1, 1, 2]))
def test_reformat_genotypes(genotype_array_with_unk):
assert np.array_equal(reformat_genotypes(genotype_array_with_unk), np.array([-1, 1, -1, 2]))
@pytest.mark.parametrize('invara,invarb,outvar', [
('TCC', 'CCC', ('T', 'C')),
('CAGGGGG', 'CTGGGGG', ('A', 'T')),
('GAT', 'GAT', ('N', 'N')),
])
def test_normalize_var(invara, invarb, outvar):
assert normalize_var(invara, invarb) == outvar
@pytest.mark.parametrize('gt,dp,gq,idxs', [
(
np.array([0, 0, 2, 0]),
np.array([20, 4, 99, 33]),
np.array([15, 9, 22, 4]),
np.array([0, 2]),
),
(
np.array([0, -1, -1, 1]),
np.array([20, -1, -1, 47]),
np.array([15, -1, -1, 23]),
np.array([0, 3]),
),
(
np.array([0, 0, 0, 0]),
np.array([55, 99, 10, 34]),
np.array([5, 9, 23, 56]),
np.array([3]),
),
])
def test_get_good_idxs(gt, dp, gq, idxs):
assert np.array_equal(get_good_idxs(gt, dp, gq), idxs)
@pytest.mark.parametrize('seqa,seqb,dist', [
('AAA', 'ATA', 1),
('ATTTT', 'CAAAA', 5),
])
def test_levenshtein(seqa, seqb, dist):
assert levenshtein(seqa, seqb) == dist
@pytest.mark.parametrize('gt,rd,ad,idx', [
(
np.array([0, 0, 2, 0]),
np.array([0, 0, 12, 0]),
np.array([15, 9, 0, 18]),
2,
),
(
np.array([0, 0, 2, 1]),
np.array([0, 0, 12, 4]),
np.array([15, 9, 0, 8]),
None,
),
(
| np.array([0, 0, 0, 1]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import os.path
import scipy,scipy.spatial
import matplotlib
matplotlib.rcParams['figure.dpi'] = 100
from data_utilities import *
# from definitions import *
# from run_train_eval_net import run_train_eval_net,run_eval_net
# In[2]:
import os
GPU = "1"
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=GPU
# In[3]:
dataset_name = 'ManyTx'
dataset_path='../../orbit_rf_dataset/data/compact_pkl_datasets/'
compact_dataset = load_compact_pkl_dataset(dataset_path,dataset_name)
tx_list = compact_dataset['tx_list']
rx_list = compact_dataset['rx_list']
equalized = 0
capture_date_list = compact_dataset['capture_date_list']
n_tx = len(tx_list)
n_rx = len(rx_list)
print(n_tx,n_rx)
# In[4]:
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import regularizers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
import tensorflow.keras.backend as K
# In[5]:
def create_net(ntx_i):
inputs = Input(shape=(256,2))
x = Reshape((256,2,1))(inputs)
x = Conv2D(8,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,2),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,2))(x)
x = Conv2D(32,(3,1),activation='relu',padding = 'same')(x)
x = MaxPool2D((2,1))(x)
x = Conv2D(16,(3,1),activation='relu',padding = 'same')(x)
#x = resnet(x,64,(3,2),'6')
#x = MaxPool2D((2,2))(x)
x = Flatten()(x)
x = Dense(100, activation='relu', kernel_regularizer = keras.regularizers.l2(0.0001))(x)
# x = Dropout(0.3)(x)
x = Dense(80, activation='relu',kernel_regularizer = keras.regularizers.l2(0.0001))(x)
x = Dropout(0.5)(x)
x = Dense(ntx_i, activation='softmax',kernel_regularizer = keras.regularizers.l2(0.0001))(x)
ops = x
classifier = Model(inputs,ops)
classifier.compile(loss='categorical_crossentropy',metrics=['categorical_accuracy'],optimizer=keras.optimizers.Adam(0.0005))
return classifier
classifier = create_net(5)
classifier.summary()
# In[6]:
def evaluate_test(classifier):
pred = classifier.predict(sig_dfTest)
acc = np.mean(np.argmax(pred,1)==txidNum_dfTest)
test_indx = ()
for indx in range(len(tx_list)):
cls_indx = np.where(txidNum_dfTest == indx)
test_indx = test_indx + (cls_indx[0][:n_test_samples],)
test_indx = np.concatenate(test_indx)
acc_bal = np.mean( | np.argmax(pred[test_indx,:],1) | numpy.argmax |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.